Compare commits

..

2 Commits

91 changed files with 1825 additions and 2642 deletions

View File

@ -132,7 +132,7 @@
"filename": "core/authelia.yml", "filename": "core/authelia.yml",
"hashed_secret": "a32b08d97b1615dc27f58b6b17f67624c04e2c4f", "hashed_secret": "a32b08d97b1615dc27f58b6b17f67624c04e2c4f",
"is_verified": false, "is_verified": false,
"line_number": 201, "line_number": 185,
"is_secret": false "is_secret": false
} }
], ],
@ -187,5 +187,5 @@
} }
] ]
}, },
"generated_at": "2024-08-30T18:12:43Z" "generated_at": "2023-08-24T20:00:24Z"
} }

52
.terraform.lock.hcl generated
View File

@ -2,39 +2,39 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "2.2.0" version = "2.0.0"
hashes = [ hashes = [
"h1:BAjqzVkuXxHtRKG+l9unaZJPk2kWZpSTCEcQPRcl2so=", "h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:052f909d25121e93dc799290216292fca67943ccde12ba515068b838a6ff8c66", "zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:20e29aeb9989f7a1e04bb4093817c7acc4e1e737bb21a3066f3ea46f2001feff", "zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:2326d101ef427599b72cce30c0e0c1d18ae783f1a897c20f2319fbf54bab0a61", "zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:3420cbe4fd19cdc96d715d0ae8e79c272608023a76033bbf582c30637f6d570f",
"zh:41ec570f87f578f1c57655e2e4fbdb9932d94cf92dc9cd11828cccedf36dd4a4",
"zh:5f90dcc58e3356ffead82ea211ecb4a2d7094d3c2fbd14ff85527c3652a595a2",
"zh:64aaa48609d2db868fcfd347490df0e12c6c3fcb8e4f12908c5d52b1a0adf73f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:86b4923e10e6ba407d1d2aab83740b702058e8b01460af4f5f0e4008f40e492c", "zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:ae89dcba33097af33a306344d20e4e25181f15dcc1a860b42db5b7199a97c6a6", "zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:ce56d68cdfba60891765e94f9c0bf69eddb985d44d97db9f91874bea027f08e2", "zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:e993bcde5dbddaedf3331e3014ffab904f98ab0f5e8b5d6082b7ca5083e0a2f1", "zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
] ]
} }
provider "registry.terraform.io/hashicorp/random" { provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0" version = "3.5.1"
hashes = [ hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=", "h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d", "zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211", "zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829", "zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d", "zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055", "zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17", "zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21", "zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839", "zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0", "zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c", "zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e", "zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
] ]
} }

View File

@ -87,16 +87,6 @@ apply:
-auto-approve \ -auto-approve \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \ -var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: refresh
refresh:
@terraform refresh \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: destroy
destroy:
@terraform destroy \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: clean .PHONY: clean
clean: clean:
env VIRTUAL_ENV=$(VENV) $(VENV)/bin/ansible-playbook -vv \ env VIRTUAL_ENV=$(VENV) $(VENV)/bin/ansible-playbook -vv \

View File

@ -1,6 +1,6 @@
resource "nomad_acl_policy" "anon_policy" { resource "nomad_acl_policy" "anon_policy" {
name = "anonymous" name = "anonymous"
description = "Anon read only" description = "Anon RO"
rules_hcl = file("${path.module}/nomad-anon-policy.hcl") rules_hcl = file("${path.module}/nomad-anon-policy.hcl")
} }

View File

@ -1,72 +1,62 @@
--- ---
all: all:
hosts:
n1.thefij:
nomad_node_class: ingress
nomad_reserved_memory: 1024
# nomad_meta:
# hw_transcode.device: /dev/dri
# hw_transcode.type: intel
nfs_mounts:
- src: 10.50.250.2:/srv/volumes
path: /srv/volumes/moxy
opts: proto=tcp,rw
nomad_unique_host_volumes:
- name: mysql-data
path: /srv/volumes/mysql
owner: "999"
group: "100"
mode: "0755"
read_only: false
- name: postgres-data
path: /srv/volumes/postgres
owner: "999"
group: "999"
mode: "0755"
read_only: false
# n2.thefij:
# nomad_node_class: ingress
# nomad_reserved_memory: 1024
# nfs_mounts:
# - src: 10.50.250.2:/srv/volumes
# path: /srv/volumes/moxy
# opts: proto=tcp,rw
# nomad_unique_host_volumes:
# - name: nextcloud-data
# path: /srv/volumes/nextcloud
# owner: "root"
# group: "bin"
# mode: "0755"
# read_only: false
pi4:
nomad_node_class: ingress
nomad_reserved_memory: 512
nomad_meta:
hw_transcode.device: /dev/video11
hw_transcode.type: raspberry
qnomad.thefij:
ansible_host: 192.168.2.234
nomad_reserved_memory: 1024
# This VM uses a non-standard interface
nomad_network_interface: ens3
nomad_instances:
vars:
nomad_network_interface: eth0
children: children:
nomad_servers: {} servers:
nomad_clients: {} hosts:
nomad_servers: n1.thefij:
hosts: nomad_node_role: both
nonopi.thefij: # nomad_meta:
ansible_host: 192.168.2.170 # hw_transcode.device: /dev/dri
n1.thefij: {} # hw_transcode.type: intel
# n2.thefij: {} nfs_mounts:
pi4: {} - src: 10.50.250.2:/srv/volumes
# qnomad.thefij: {} path: /srv/volumes/moxy
nomad_clients: opts: proto=tcp,rw
hosts: nomad_unique_host_volumes:
n1.thefij: {} - name: mysql-data
# n2.thefij: {} path: /srv/volumes/mysql
pi4: {} owner: "999"
# qnomad.thefij: {} group: "100"
mode: "0755"
read_only: false
- name: postgres-data
path: /srv/volumes/postgres
owner: "999"
group: "999"
mode: "0755"
read_only: false
n2.thefij:
nfs_mounts:
- src: 10.50.250.2:/srv/volumes
path: /srv/volumes/moxy
opts: proto=tcp,rw
nomad_node_class: ingress
nomad_node_role: both
nomad_unique_host_volumes:
- name: nextcloud-data
path: /srv/volumes/nextcloud
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: gitea-data
path: /srv/volumes/gitea
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: sonarr-data
path: /srv/volumes/sonarr
owner: "root"
group: "bin"
mode: "0755"
read_only: false
pi4:
nomad_node_role: both
nomad_meta:
hw_transcode.device: /dev/video11
hw_transcode.type: raspberry
nomad_instances:
children:
servers: {}

View File

@ -14,14 +14,8 @@
state: restarted state: restarted
become: true become: true
- name: Start Docker
systemd:
name: docker
state: started
become: true
- name: Start Nomad - name: Start Nomad
systemd: systemd:
name: nomad name: nomad
state: started state: stopped
become: true become: true

View File

@ -1,6 +1,6 @@
--- ---
- name: Recover Nomad - name: Recover Nomad
hosts: nomad_servers hosts: nomad_instances
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

View File

@ -14,7 +14,7 @@
line: "nameserver {{ non_nomad_dns }}" line: "nameserver {{ non_nomad_dns }}"
- name: Install Docker - name: Install Docker
hosts: nomad_clients hosts: nomad_instances
become: true become: true
vars: vars:
docker_architecture_map: docker_architecture_map:
@ -44,7 +44,7 @@
# state: present # state: present
- name: Create NFS mounts - name: Create NFS mounts
hosts: nomad_clients hosts: nomad_instances
become: true become: true
vars: vars:
shared_nfs_mounts: shared_nfs_mounts:
@ -56,10 +56,6 @@
path: /srv/volumes/media-write path: /srv/volumes/media-write
opts: proto=tcp,port=2049,rw opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Overflow
path: /srv/volumes/nas-overflow
opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Photos - src: 192.168.2.10:/Photos
path: /srv/volumes/photos path: /srv/volumes/photos
opts: proto=tcp,port=2049,rw opts: proto=tcp,port=2049,rw
@ -101,12 +97,6 @@
group: "root" group: "root"
mode: "0755" mode: "0755"
read_only: false read_only: false
- name: media-overflow-write
path: /srv/volumes/nas-overflow/Media
owner: "root"
group: "root"
mode: "0755"
read_only: false
- name: media-downloads - name: media-downloads
path: /srv/volumes/media-write/Downloads path: /srv/volumes/media-write/Downloads
read_only: false read_only: false
@ -122,27 +112,12 @@
- name: nzbget-config - name: nzbget-config
path: /srv/volumes/nas-container/nzbget path: /srv/volumes/nas-container/nzbget
read_only: false read_only: false
- name: sonarr-config
path: /srv/volumes/nas-container/sonarr
read_only: false
- name: lidarr-config - name: lidarr-config
path: /srv/volumes/nas-container/lidarr path: /srv/volumes/nas-container/lidarr
read_only: false read_only: false
- name: radarr-config
path: /srv/volumes/nas-container/radarr
read_only: false
- name: bazarr-config - name: bazarr-config
path: /srv/volumes/nas-container/bazarr path: /srv/volumes/nas-container/bazarr
read_only: false read_only: false
- name: gitea-data
path: /srv/volumes/nas-container/gitea
read_only: false
- name: ytdl-web
path: /srv/volumes/nas-container/ytdl-web
read_only: false
- name: christmas-community
path: /srv/volumes/nas-container/christmas-community
read_only: false
- name: all-volumes - name: all-volumes
path: /srv/volumes path: /srv/volumes
owner: "root" owner: "root"
@ -153,10 +128,9 @@
roles: roles:
- name: ansible-nomad - name: ansible-nomad
vars: vars:
nomad_version: "1.9.3-1" nomad_version: "1.6.1-1"
nomad_install_upgrade: true nomad_install_upgrade: true
nomad_allow_purge_config: true nomad_allow_purge_config: true
nomad_node_role: "{% if 'nomad_clients' in group_names %}{% if 'nomad_servers' in group_names %}both{% else %}client{% endif %}{% else %}server{% endif %}"
# Where nomad gets installed to # Where nomad gets installed to
nomad_bin_dir: /usr/bin nomad_bin_dir: /usr/bin
@ -210,8 +184,7 @@
nomad_bind_address: 0.0.0.0 nomad_bind_address: 0.0.0.0
# Default interface for binding tasks # Default interface for binding tasks
# This is now set at the inventory level nomad_network_interface: eth0
# nomad_network_interface: eth0
# Create networks for binding task ports # Create networks for binding task ports
nomad_host_networks: nomad_host_networks:
@ -230,7 +203,7 @@
enabled: true enabled: true
- name: Bootstrap Nomad ACLs and scheduler - name: Bootstrap Nomad ACLs and scheduler
hosts: nomad_servers hosts: nomad_instances
tasks: tasks:
- name: Start Nomad - name: Start Nomad
@ -260,7 +233,6 @@
run_once: true run_once: true
ignore_errors: true ignore_errors: true
register: bootstrap_result register: bootstrap_result
changed_when: bootstrap_result is succeeded
- name: Save bootstrap result - name: Save bootstrap result
copy: copy:
@ -292,15 +264,13 @@
- list - list
environment: environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}" NOMAD_TOKEN: "{{ read_secretid.stdout }}"
register: policies
run_once: true run_once: true
changed_when: false register: policies
- name: Copy policy - name: Copy policy
copy: copy:
src: ../acls/nomad-anon-policy.hcl src: ../acls/nomad-anon-policy.hcl
dest: /tmp/anonymous.policy.hcl dest: /tmp/anonymous.policy.hcl
delegate_to: "{{ play_hosts[0] }}"
run_once: true run_once: true
register: anon_policy register: anon_policy
@ -311,7 +281,7 @@
- acl - acl
- policy - policy
- apply - apply
- -description=Anon read only - -description="Anon read only"
- anonymous - anonymous
- /tmp/anonymous.policy.hcl - /tmp/anonymous.policy.hcl
environment: environment:
@ -320,18 +290,6 @@
delegate_to: "{{ play_hosts[0] }}" delegate_to: "{{ play_hosts[0] }}"
run_once: true run_once: true
- name: Read scheduler config
command:
argv:
- nomad
- operator
- scheduler
- get-config
- -json
run_once: true
register: scheduler_config
changed_when: false
- name: Enable service scheduler preemption - name: Enable service scheduler preemption
command: command:
argv: argv:
@ -339,24 +297,12 @@
- operator - operator
- scheduler - scheduler
- set-config - set-config
- -preempt-system-scheduler=true
- -preempt-service-scheduler=true - -preempt-service-scheduler=true
environment: environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}" NOMAD_TOKEN: "{{ read_secretid.stdout }}"
delegate_to: "{{ play_hosts[0] }}"
run_once: true run_once: true
when: (scheduler_config.stdout | from_json)["SchedulerConfig"]["PreemptionConfig"]["ServiceSchedulerEnabled"] is false
- name: Enable system scheduler preemption
command:
argv:
- nomad
- operator
- scheduler
- set-config
- -preempt-system-scheduler=true
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
run_once: true
when: (scheduler_config.stdout | from_json)["SchedulerConfig"]["PreemptionConfig"]["SystemSchedulerEnabled"] is false
# - name: Set up Nomad backend and roles in Vault # - name: Set up Nomad backend and roles in Vault
# community.general.terraform: # community.general.terraform:

View File

@ -9,6 +9,8 @@ nomad/jobs/authelia:
db_user: VALUE db_user: VALUE
email_sender: VALUE email_sender: VALUE
jwt_secret: VALUE jwt_secret: VALUE
lldap_admin_password: VALUE
lldap_admin_user: VALUE
oidc_clients: VALUE oidc_clients: VALUE
oidc_hmac_secret: VALUE oidc_hmac_secret: VALUE
oidc_issuer_certificate_chain: VALUE oidc_issuer_certificate_chain: VALUE
@ -22,29 +24,21 @@ nomad/jobs/backup:
nas_ftp_host: VALUE nas_ftp_host: VALUE
nas_ftp_pass: VALUE nas_ftp_pass: VALUE
nas_ftp_user: VALUE nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-n1: nomad/jobs/backup-oneoff-n1:
backup_passphrase: VALUE backup_passphrase: VALUE
nas_ftp_host: VALUE nas_ftp_host: VALUE
nas_ftp_pass: VALUE nas_ftp_pass: VALUE
nas_ftp_user: VALUE nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-n2: nomad/jobs/backup-oneoff-n2:
backup_passphrase: VALUE backup_passphrase: VALUE
nas_ftp_host: VALUE nas_ftp_host: VALUE
nas_ftp_pass: VALUE nas_ftp_pass: VALUE
nas_ftp_user: VALUE nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-pi4: nomad/jobs/backup-oneoff-pi4:
backup_passphrase: VALUE backup_passphrase: VALUE
nas_ftp_host: VALUE nas_ftp_host: VALUE
nas_ftp_pass: VALUE nas_ftp_pass: VALUE
nas_ftp_user: VALUE nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/bazarr: nomad/jobs/bazarr:
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
@ -67,7 +61,6 @@ nomad/jobs/git:
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
db_user: VALUE db_user: VALUE
oidc_secret: VALUE
secret_key: VALUE secret_key: VALUE
smtp_sender: VALUE smtp_sender: VALUE
nomad/jobs/grafana: nomad/jobs/grafana:
@ -90,15 +83,17 @@ nomad/jobs/immich:
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
db_user: VALUE db_user: VALUE
nomad/jobs/lego: nomad/jobs/ipdvr/radarr:
acme_email: VALUE db_pass: VALUE
domain_lego_dns: VALUE db_user: VALUE
usersfile: VALUE
nomad/jobs/lidarr: nomad/jobs/lidarr:
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
db_user: VALUE db_user: VALUE
nomad/jobs/lldap: nomad/jobs/lldap:
admin_email: VALUE
admin_password: VALUE
admin_user: VALUE
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
db_user: VALUE db_user: VALUE
@ -116,38 +111,21 @@ nomad/jobs/photoprism:
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
db_user: VALUE db_user: VALUE
oidc_secret: VALUE
nomad/jobs/postgres-server: nomad/jobs/postgres-server:
superuser: VALUE superuser: VALUE
superuser_pass: VALUE superuser_pass: VALUE
nomad/jobs/radarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/redis-authelia: nomad/jobs/redis-authelia:
allowed_psks: VALUE allowed_psks: VALUE
nomad/jobs/redis-blocky: nomad/jobs/redis-blocky:
allowed_psks: VALUE allowed_psks: VALUE
nomad/jobs/rediscommander: nomad/jobs/rediscommander:
redis_stunnel_psk: VALUE redis_stunnel_psk: VALUE
nomad/jobs/sonarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/traefik: nomad/jobs/traefik:
external: VALUE acme_email: VALUE
domain_lego_dns: VALUE
usersfile: VALUE usersfile: VALUE
nomad/jobs/unifi-traffic-route-ips:
unifi_password: VALUE
unifi_username: VALUE
nomad/jobs/wishlist:
guest_password: VALUE
nomad/oidc: nomad/oidc:
secret: VALUE secret: VALUE
secrets/ldap:
admin_email: VALUE
admin_password: VALUE
admin_user: VALUE
secrets/mysql: secrets/mysql:
mysql_root_password: VALUE mysql_root_password: VALUE
secrets/postgres: secrets/postgres:

View File

@ -44,11 +44,6 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
source = "all-volumes" source = "all-volumes"
} }
ephemeral_disk {
# Try to keep restic cache intact
sticky = true
}
service { service {
name = "backup" name = "backup"
provider = "nomad" provider = "nomad"
@ -62,8 +57,6 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
task "backup" { task "backup" {
driver = "docker" driver = "docker"
shutdown_delay = "5m"
volume_mount { volume_mount {
volume = "all-volumes" volume = "all-volumes"
destination = "/data" destination = "/data"
@ -71,53 +64,26 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
} }
config { config {
image = "iamthefij/restic-scheduler:0.4.2" image = "iamthefij/resticscheduler:0.2.0"
ports = ["metrics"] ports = ["metrics"]
args = [ args = [
"--push-gateway",
"http://pushgateway.nomad:9091",
%{ if batch_node != null ~} %{ if batch_node != null ~}
"-once", "-once",
"-$${NOMAD_META_task}", "-$${NOMAD_META_task}",
"$${NOMAD_META_job_name}", "$${NOMAD_META_job_name}",
"--snapshot", "--snapshot",
"$${NOMAD_META_snapshot}", "$${NOMAD_META_snapshot}",
"--push-gateway",
"http://pushgateway.nomad:9091",
%{ endif ~} %{ endif ~}
"$${NOMAD_TASK_DIR}/node-jobs.hcl", "$${NOMAD_TASK_DIR}/node-jobs.hcl",
] ]
} }
action "unlockenv" {
command = "sh"
args = ["-c", "/bin/restic-scheduler -once -unlock all $${NOMAD_TASK_DIR}/node-jobs.hcl"]
}
action "unlocktmpl" {
command = "/bin/restic-scheduler"
args = ["-once", "-unlock", "all", "{{ env 'NOMAD_TASK_DIR' }}/node-jobs.hcl"]
}
action "unlockhc" {
command = "/bin/restic-scheduler"
args = ["-once", "-unlock", "all", "/local/node-jobs.hcl"]
}
action "backupall" {
command = "/bin/restic-scheduler"
args = ["-once", "-backup", "all", "/local/node-jobs.hcl"]
}
action "backupallenv" {
command = "sh"
args = ["-c", "/bin/restic-scheduler -once -backup all $${NOMAD_TASK_DIR}/node-jobs.hcl"]
}
env = { env = {
RCLONE_CHECKERS = "2" "RCLONE_CHECKERS" = "2"
RCLONE_TRANSFERS = "2" "RCLONE_TRANSFERS" = "2"
RCLONE_FTP_CONCURRENCY = "5" "RCLONE_FTP_CONCURRENCY" = "5"
RESTIC_CACHE_DIR = "$${NOMAD_ALLOC_DIR}/data"
TZ = "America/Los_Angeles"
} }
template { template {
@ -141,14 +107,13 @@ RCLONE_FTP_USER={{ .nas_ftp_user }}
RCLONE_FTP_PASS={{ .nas_ftp_pass.Value | toJSON }} RCLONE_FTP_PASS={{ .nas_ftp_pass.Value | toJSON }}
RCLONE_FTP_EXPLICIT_TLS=true RCLONE_FTP_EXPLICIT_TLS=true
RCLONE_FTP_NO_CHECK_CERTIFICATE=true RCLONE_FTP_NO_CHECK_CERTIFICATE=true
AWS_ACCESS_KEY_ID={{ .nas_minio_access_key_id }}
AWS_SECRET_ACCESS_KEY={{ .nas_minio_secret_access_key }}
{{ end -}} {{ end -}}
EOF EOF
destination = "secrets/db.env" destination = "secrets/db.env"
env = true env = true
} }
template { template {
# Build jobs based on node # Build jobs based on node
data = <<EOF data = <<EOF
@ -163,31 +128,13 @@ ${file("${module_path}/${job_file}")}
{{ end -}} {{ end -}}
{{ end -}} {{ end -}}
%{ endfor ~} %{ endfor ~}
# Dummy job to keep task healthy on node without any stateful services
job "Dummy" {
schedule = "@daily"
config {
repo = "/local/dummy-repo"
passphrase = env("BACKUP_PASSPHRASE")
}
backup {
paths = ["/local/node-jobs.hcl"]
}
forget {
KeepLast = 1
}
}
EOF EOF
destination = "local/node-jobs.hcl" destination = "local/node-jobs.hcl"
} }
resources { resources {
cpu = 50 cpu = 50
memory = 500 memory = 256
} }
} }
@ -200,8 +147,8 @@ job "Dummy" {
} }
config { config {
image = "iamthefij/stunnel:1.0.0" image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["/bin/sh", "$${NOMAD_TASK_DIR}/start.sh"]
} }
resources { resources {
@ -209,6 +156,15 @@ job "Dummy" {
memory = 100 memory = 100
} }
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
EOF
destination = "$${NOMAD_TASK_DIR}/start.sh"
}
template { template {
data = <<EOF data = <<EOF
syslog = no syslog = no

View File

@ -8,7 +8,7 @@ resource "nomad_job" "backup" {
resource "nomad_job" "backup-oneoff" { resource "nomad_job" "backup-oneoff" {
# TODO: Get list of nomad hosts dynamically # TODO: Get list of nomad hosts dynamically
for_each = toset(["n1", "pi4"]) for_each = toset(["n1", "n2", "pi4"])
# for_each = toset([ # for_each = toset([
# for node in data.consul_service.nomad.service : # for node in data.consul_service.nomad.service :
# node.node_name # node.node_name
@ -22,9 +22,7 @@ resource "nomad_job" "backup-oneoff" {
} }
locals { locals {
# NOTE: This can't be dynamic in first deploy since these values are not known all_job_ids = toset(flatten([[for job in resource.nomad_job.backup-oneoff : job.id], [resource.nomad_job.backup.id]]))
# all_job_ids = toset(flatten([[for job in resource.nomad_job.backup-oneoff : job.id], [resource.nomad_job.backup.id]]))
all_job_ids = toset(["backup", "backup-oneoff-n1", "backup-oneoff-pi4"])
} }
resource "nomad_acl_policy" "secrets_mysql" { resource "nomad_acl_policy" "secrets_mysql" {

View File

@ -2,12 +2,8 @@ job "authelia" {
schedule = "@daily" schedule = "@daily"
config { config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/authelia" repo = "rclone::ftp,env_auth:/nomad/authelia"
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
} }
task "Create local authelia dir" { task "Create local authelia dir" {

View File

@ -1,57 +0,0 @@
job "git" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/gitea"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local gitea dir" {
pre_script {
on_backup = "mkdir -p /local/gitea"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "gitea"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/gitea/dump.sql"
}
}
backup {
paths = [
"/local/gitea",
"/data/nas-container/gitea",
]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -2,12 +2,8 @@ job "grafana" {
schedule = "@daily" schedule = "@daily"
config { config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/grafana" repo = "rclone::ftp,env_auth:/nomad/grafana"
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
} }
task "Create local grafana dir" { task "Create local grafana dir" {

View File

@ -2,12 +2,8 @@ job "lidarr" {
schedule = "@daily" schedule = "@daily"
config { config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/lidarr" repo = "rclone::ftp,env_auth:/nomad/lidarr"
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
} }
task "Backup main database" { task "Backup main database" {
@ -38,11 +34,7 @@ job "lidarr" {
paths = ["/data/nas-container/lidarr"] paths = ["/data/nas-container/lidarr"]
backup_opts { backup_opts {
Exclude = [ Exclude = ["lidarr_backup_*.zip"]
"lidarr_backup_*.zip",
"/data/nas-container/lidarr/MediaCover",
"/data/nas-container/lidarr/logs",
]
Host = "nomad" Host = "nomad"
} }

View File

@ -2,12 +2,8 @@ job "lldap" {
schedule = "@daily" schedule = "@daily"
config { config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/lldap" repo = "rclone::ftp,env_auth:/nomad/lldap"
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
} }
task "Create local backup dir" { task "Create local backup dir" {

View File

@ -2,12 +2,8 @@ job "nzbget" {
schedule = "@daily" schedule = "@daily"
config { config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/nzbget" repo = "rclone::ftp,env_auth:/nomad/nzbget"
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
} }
backup { backup {

View File

@ -2,12 +2,8 @@ job "photoprism" {
schedule = "10 * * * *" schedule = "10 * * * *"
config { config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/photoprism" repo = "rclone::ftp,env_auth:/nomad/photoprism"
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
} }
task "Create local photoprism dir" { task "Create local photoprism dir" {
@ -36,9 +32,6 @@ job "photoprism" {
backup_opts { backup_opts {
Host = "nomad" Host = "nomad"
Exclude = [
"/data/nas-container/photoprism/cache",
]
} }
restore_opts { restore_opts {

View File

@ -1,64 +0,0 @@
job "radarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/radarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "radarr"
no_tablespaces = true
dump_to = "/data/nas-container/radarr/Backups/dump-radarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "radarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/radarr/Backups/dump-radarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/radarr"]
backup_opts {
Exclude = [
"radarr_backup_*.zip",
"/data/nas-container/radarr/MediaCover",
"/data/nas-container/radarr/logs",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -2,12 +2,8 @@ job "sabnzbd" {
schedule = "@daily" schedule = "@daily"
config { config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/sabnzbd" repo = "rclone::ftp,env_auth:/nomad/sabnzbd"
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
} }
backup { backup {

View File

@ -2,46 +2,30 @@ job "sonarr" {
schedule = "@daily" schedule = "@daily"
config { config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/sonarr" repo = "rclone::ftp,env_auth:/nomad/sonarr"
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
} }
task "Backup main database" { task "Backup main database" {
postgres "Backup database" { sqlite "Backup database" {
hostname = env("POSTGRES_HOST") path = "/data/sonarr/sonarr.db"
port = env("POSTGRES_PORT") dump_to = "/data/sonarr/Backups/sonarr.db.bak"
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "sonarr"
no_tablespaces = true
dump_to = "/data/nas-container/sonarr/Backups/dump-sonarr.sql"
} }
} }
task "Backup logs database" { task "Backup logs database" {
postgres "Backup database" { sqlite "Backup database" {
hostname = env("POSTGRES_HOST") path = "/data/sonarr/logs.db"
port = env("POSTGRES_PORT") dump_to = "/data/sonarr/Backups/logs.db.bak"
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "sonarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/sonarr/Backups/dump-sonarr-logs.sql"
} }
} }
backup { backup {
paths = ["/data/nas-container/sonarr"] paths = ["/data/sonarr"]
backup_opts { backup_opts {
Exclude = [ Exclude = [
"sonarr_backup_*.zip", "sonarr_backup_*.zip",
"/data/nas-container/sonarr/MediaCover",
"/data/nas-container/sonarr/logs",
"*.db", "*.db",
"*.db-shm", "*.db-shm",
"*.db-wal", "*.db-wal",

View File

@ -2,39 +2,39 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "2.1.1" version = "2.0.0"
hashes = [ hashes = [
"h1:liQBgBXfQEYmwpoGZUfSsu0U0t/nhvuRZbMhaMz7wcQ=", "h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:28bc6922e8a21334568410760150d9d413d7b190d60b5f0b4aab2f4ef52efeeb", "zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:2d4283740e92ce1403875486cd5ff2c8acf9df28c190873ab4d769ce37db10c1", "zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:457e16d70075eae714a7df249d3ba42c2176f19b6750650152c56f33959028d9", "zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:49ee88371e355c00971eefee6b5001392431b47b3e760a5c649dda76f59fb8fa",
"zh:614ad3bf07155ed8a5ced41dafb09042afbd1868490a379654b3e970def8e33d",
"zh:75be7199d76987e7549e1f27439922973d1bf27353b44a593bfbbc2e3b9f698f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:888e14a24410d56b37212fbea373a3e0401d0ff8f8e4f4dd00ba8b29de9fed39", "zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:aa261925e8b152636a0886b3a2149864707632d836e98a88dacea6cfb6302082", "zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:ac10cefb4064b3bb63d4b0379624a416a45acf778eac0004466f726ead686196", "zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:b1a3c8b4d5b2dc9b510eac5e9e02665582862c24eb819ab74f44d3d880246d4f", "zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:c552e2fe5670b6d3ad9a5faf78e3a27197eeedbe2b13928d2c491fa509bc47c7", "zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
] ]
} }
provider "registry.terraform.io/hashicorp/random" { provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0" version = "3.5.1"
hashes = [ hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=", "h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d", "zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211", "zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829", "zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d", "zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055", "zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17", "zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21", "zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839", "zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0", "zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c", "zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e", "zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
] ]
} }

View File

@ -4,7 +4,7 @@ module "authelia" {
name = "authelia" name = "authelia"
instance_count = 2 instance_count = 2
priority = 70 priority = 70
image = "authelia/authelia:4.38" image = "authelia/authelia:4.37"
args = ["--config", "$${NOMAD_TASK_DIR}/authelia.yml"] args = ["--config", "$${NOMAD_TASK_DIR}/authelia.yml"]
ingress = true ingress = true
service_port = 9999 service_port = 9999
@ -49,7 +49,7 @@ module "authelia" {
mount = false mount = false
}, },
{ {
data = "{{ with nomadVar \"secrets/ldap\" }}{{ .admin_password }}{{ end }}" data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .lldap_admin_password }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}" dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "ldap_password.txt" dest = "ldap_password.txt"
mount = false mount = false
@ -105,62 +105,6 @@ module "authelia" {
] ]
} }
resource "nomad_acl_policy" "authelia" {
name = "authelia"
description = "Give access to shared authelia variables"
rules_hcl = <<EOH
namespace "default" {
variables {
path "authelia/*" {
capabilities = ["read"]
}
path "secrets/authelia/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = module.authelia.job_id
}
}
# Give access to ldap secrets
resource "nomad_acl_policy" "authelia_ldap_secrets" {
name = "authelia-secrets-ldap"
description = "Give access to LDAP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/ldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = module.authelia.job_id
}
}
# Enable oidc for nomad clients
module "nomad_oidc_client" {
source = "./oidc_client"
name = "nomad"
oidc_client_config = {
description = "Nomad"
authorization_policy = "two_factor"
redirect_uris = [
"https://nomad.${var.base_hostname}/oidc/callback",
"https://nomad.${var.base_hostname}/ui/settings/tokens",
]
scopes = ["openid", "groups"]
}
}
resource "nomad_acl_auth_method" "nomad_authelia" { resource "nomad_acl_auth_method" "nomad_authelia" {
name = "authelia" name = "authelia"
type = "OIDC" type = "OIDC"
@ -170,9 +114,9 @@ resource "nomad_acl_auth_method" "nomad_authelia" {
config { config {
oidc_discovery_url = "https://authelia.${var.base_hostname}" oidc_discovery_url = "https://authelia.${var.base_hostname}"
oidc_client_id = module.nomad_oidc_client.client_id oidc_client_id = "nomad"
oidc_client_secret = module.nomad_oidc_client.secret oidc_client_secret = yamldecode(file("${path.module}/../ansible_playbooks/vars/nomad_vars.yml"))["nomad/oidc"]["secret"]
bound_audiences = [module.nomad_oidc_client.client_id] bound_audiences = ["nomad"]
oidc_scopes = [ oidc_scopes = [
"groups", "groups",
"openid", "openid",
@ -190,7 +134,7 @@ resource "nomad_acl_auth_method" "nomad_authelia" {
resource "nomad_acl_binding_rule" "nomad_authelia_admin" { resource "nomad_acl_binding_rule" "nomad_authelia_admin" {
description = "engineering rule" description = "engineering rule"
auth_method = nomad_acl_auth_method.nomad_authelia.name auth_method = nomad_acl_auth_method.nomad_authelia.name
selector = "\"nomad-admin\" in list.roles" selector = "\"nomad-deploy\" in list.roles"
bind_type = "role" bind_type = "role"
bind_name = "admin" # acls.nomad_acl_role.admin.name bind_name = "admin" # acls.nomad_acl_role.admin.name
} }

View File

@ -89,8 +89,8 @@ authentication_backend:
groups_filter: (member={dn}) groups_filter: (member={dn})
## The username and password of the admin user. ## The username and password of the admin user.
{{ with nomadVar "secrets/ldap" }} {{ with nomadVar "nomad/jobs/authelia" }}
user: uid={{ .admin_user }},ou=people,{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }} user: uid={{ .lldap_admin_user }},ou=people,{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}
{{ end }} {{ end }}
# password set using secrets file # password set using secrets file
# password: <secret> # password: <secret>
@ -151,22 +151,6 @@ access_control:
networks: 192.168.5.0/24 networks: 192.168.5.0/24
rules: rules:
## Allow favicons on internal network
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
resources:
- '^/apple-touch-icon-precomposed\.png$'
- '^/assets/safari-pinned-tab\.svg$'
- '^/apple-touch-icon-180x180\.png$'
- '^/apple-touch-icon\.png$'
- '^/favicon\.ico$'
networks:
- internal
policy: bypass
{{ range nomadVarList "authelia/access_control/service_rules" }}{{ with nomadVar .Path }}
- domain: '{{ .name }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
{{ .rule.Value | indent 6 }}
{{ end }}{{ end }}
## Rules applied to everyone ## Rules applied to everyone
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}' - domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
networks: networks:
@ -235,7 +219,7 @@ storage:
## The available providers are: filesystem, smtp. You must use only one of these providers. ## The available providers are: filesystem, smtp. You must use only one of these providers.
notifier: notifier:
## You can disable the notifier startup check by setting this to true. ## You can disable the notifier startup check by setting this to true.
disable_startup_check: true disable_startup_check: false
{{ with nomadVar "secrets/smtp" }} {{ with nomadVar "secrets/smtp" }}
smtp: smtp:
@ -261,18 +245,4 @@ identity_providers:
# hmac_secret: <file> # hmac_secret: <file>
# issuer_private_key: <file> # issuer_private_key: <file>
clients: clients: {{ with nomadVar "nomad/jobs/authelia" }}{{ .oidc_clients.Value }}{{ end }}
{{ range nomadVarList "authelia/access_control/oidc_clients" -}}
{{- $name := (sprig_last (sprig_splitList "/" .Path)) -}}
{{ "-" | indent 6 }}
{{ with nomadVar .Path }}
{{- $im := .ItemsMap -}}
{{- $im = sprig_set $im "redirect_uris" (.redirect_uris.Value | parseYAML) -}}
{{- $im = sprig_set $im "scopes" (.scopes.Value | parseYAML) -}}
{{- with nomadVar (printf "secrets/authelia/%s" $name) -}}
{{- $im = sprig_set $im "secret" .secret_hash.Value -}}
{{- end -}}
{{ $im | toYAML | indent 8 }}
{{ end }}
{{ end }}

View File

@ -1,23 +1,20 @@
variable "config_data" {
type = string
description = "Plain text config file for blocky"
}
job "blocky" { job "blocky" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "system"
priority = 100 priority = 100
constraint {
distinct_hosts = true
}
update { update {
max_parallel = 1 max_parallel = 1
auto_revert = true # TODO: maybe switch to service job from system so we can use canary and autorollback
min_healthy_time = "60s" # auto_revert = true
healthy_deadline = "5m"
} }
group "blocky" { group "blocky" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/12023
count = 2
network { network {
mode = "bridge" mode = "bridge"
@ -35,9 +32,7 @@ job "blocky" {
dns { dns {
# Set expclicit DNS servers because tasks, by default, use this task # Set expclicit DNS servers because tasks, by default, use this task
servers = [ servers = ["1.1.1.1", "1.0.0.1"]
"192.168.2.1",
]
} }
} }
@ -65,11 +60,6 @@ job "blocky" {
path = "/" path = "/"
interval = "10s" interval = "10s"
timeout = "3s" timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
} }
} }
@ -77,31 +67,19 @@ job "blocky" {
driver = "docker" driver = "docker"
config { config {
image = "ghcr.io/0xerr0r/blocky:v0.24" image = "ghcr.io/0xerr0r/blocky:v0.22"
args = ["-c", "$${NOMAD_TASK_DIR}/config.yml"] args = ["-c", "$${NOMAD_TASK_DIR}/config.yml"]
ports = ["dns", "api"] ports = ["dns", "api"]
} }
action "refresh-lists" {
command = "/app/blocky"
args = ["lists", "refresh"]
}
action "healthcheck" {
command = "/app/blocky"
args = ["healthcheck"]
}
resources { resources {
cpu = 50 cpu = 50
memory = 75 memory = 50
memory_max = 150 memory_max = 100
} }
template { template {
data = <<EOF data = var.config_data
${file("${module_path}/config.yml")}
EOF
destination = "$${NOMAD_TASK_DIR}/config.yml" destination = "$${NOMAD_TASK_DIR}/config.yml"
splay = "1m" splay = "1m"
@ -127,121 +105,6 @@ EOF
max = "20s" max = "20s"
} }
} }
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/user" }}
{{ with nomadVar "blocky_lists/user" -}}
{{ .block_list.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/block"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "30s"
max = "1m"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/user" }}
{{ with nomadVar "blocky_lists/user" -}}
{{ .allow_list.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/allow"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "30s"
max = "1m"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .smarttv_regex.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/smarttv-regex.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .wemo.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/wemo.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .sonos.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/sonos.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
} }
task "stunnel" { task "stunnel" {
@ -253,9 +116,9 @@ EOF
} }
config { config {
image = "iamthefij/stunnel:1.0.0" image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
args = ["/bin/sh", "$${NOMAD_TASK_DIR}/start.sh"]
} }
resources { resources {
@ -263,34 +126,36 @@ EOF
memory = 100 memory = 100
} }
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
EOF
destination = "$${NOMAD_TASK_DIR}/start.sh"
}
template { template {
data = <<EOF data = <<EOF
syslog = no syslog = no
foreground = yes foreground = yes
delay = yes delay = yes
[dns_server]
# Dummy server to keep stunnel running if no mysql is present
accept = 8053
connect = 127.0.0.1:53
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
[mysql_client] [mysql_client]
client = yes client = yes
accept = 127.0.0.1:3306 accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
connect = {{ .Address }}:{{ .Port }} connect = {{ .Address }}:{{ .Port }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
{{- end }} {{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}}
[redis_client] [redis_client]
client = yes client = yes
accept = 127.0.0.1:6379 accept = 127.0.0.1:6379
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}}
connect = {{ .Address }}:{{ .Port }} connect = {{ .Address }}:{{ .Port }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
{{- end }} {{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
EOF EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf" destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
} }
@ -321,9 +186,11 @@ EOF
config { config {
image = "mariadb:10" image = "mariadb:10"
args = [ args = [
"/usr/bin/timeout",
"2m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"/usr/bin/timeout 2m /bin/bash -c \"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do echo 'Retry in 10s'; sleep 10; done\" || true", "until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
] ]
} }

View File

@ -1,7 +1,16 @@
locals {
config_data = file("${path.module}/config.yml")
}
resource "nomad_job" "blocky" { resource "nomad_job" "blocky" {
hcl2 {
vars = {
"config_data" = local.config_data,
}
}
jobspec = templatefile("${path.module}/blocky.nomad", { jobspec = templatefile("${path.module}/blocky.nomad", {
use_wesher = var.use_wesher, use_wesher = var.use_wesher,
module_path = path.module,
}) })
} }
@ -57,32 +66,3 @@ EOH
task = "stunnel" task = "stunnel"
} }
} }
resource "nomad_variable" "blocky_lists_terraform" {
path = "blocky_lists/terraform"
items = {
smarttv_regex = file("${path.module}/list-smarttv-regex.txt")
wemo = file("${path.module}/list-wemo.txt")
sonos = file("${path.module}/list-sonos.txt")
}
}
resource "nomad_acl_policy" "blocky_lists" {
name = "blocky-lists"
description = "Give access Blocky lists"
rules_hcl = <<EOH
namespace "default" {
variables {
path "blocky_lists/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "blocky"
}
}

View File

@ -2,53 +2,28 @@ ports:
dns: 53 dns: 53
http: 4000 http: 4000
# I must have ip v6 blocked or something
connectIPVersion: v4
bootstrapDns: bootstrapDns:
- upstream: 1.1.1.1 - upstream: 1.1.1.1
- upstream: 1.0.0.1 - upstream: 1.0.0.1
- upstream: 9.9.9.9
- upstream: 149.112.112.112
upstream:
upstreams: default:
init: - 1.1.1.1
strategy: fast - 1.0.0.1
groups: quad9:
default: - 9.9.9.9
- https://dns.quad9.net/dns-query - 149.112.112.112
- tcp-tls:dns.quad9.net - 2620:fe::fe
- https://one.one.one.one/dns-query - 2620:fe::9
- tcp-tls:one.one.one.one - https://dns.quad9.net/dns-query
# cloudflare: - tcp-tls:dns.quad9.net
# - 1.1.1.1 quad9-unsecured:
# - 1.0.0.1 - 9.9.9.10
# - 2606:4700:4700::1111 - 149.112.112.10
# - 2606:4700:4700::1001 - 2620:fe::10
# - https://one.one.one.one/dns-query - 2620:fe::fe:10
# - tcp-tls:one.one.one.one - https://dns10.quad9.net/dns-query
# quad9: - tcp-tls:dns10.quad9.net
# - 9.9.9.9
# - 149.112.112.112
# - 2620:fe::fe
# - 2620:fe::9
# - https://dns.quad9.net/dns-query
# - tcp-tls:dns.quad9.net
# quad9-secured:
# - 9.9.9.11
# - 149.112.112.11
# - 2620:fe::11
# - 2620:fe::fe:11
# - https://dns11.quad9.net/dns-query
# - tcp-tls:dns11.quad9.net
# quad9-unsecured:
# - 9.9.9.10
# - 149.112.112.10
# - 2620:fe::10
# - 2620:fe::fe:10
# - https://dns10.quad9.net/dns-query
# - tcp-tls:dns10.quad9.net
conditional: conditional:
fallbackUpstream: false fallbackUpstream: false
@ -61,11 +36,9 @@ conditional:
.: 192.168.2.1 .: 192.168.2.1
hostsFile: hostsFile:
sources: filePath: {{ env "NOMAD_TASK_DIR" }}/nomad.hosts
- {{ env "NOMAD_TASK_DIR" }}/nomad.hosts
hostsTTL: 30s hostsTTL: 30s
loading: refreshPeriod: 30s
refreshPeriod: 30s
clientLookup: clientLookup:
upstream: 192.168.2.1 upstream: 192.168.2.1
@ -77,12 +50,22 @@ blocking:
- http://sysctl.org/cameleon/hosts - http://sysctl.org/cameleon/hosts
- https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt - https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt
- https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt - https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
# - https://hosts-file.net/ad_servers.txt - https://hosts-file.net/ad_servers.txt
iot: smarttv:
- https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV.txt - https://perflyst.github.io/PiHoleBlocklist/SmartTV.txt
- {{ env "NOMAD_TASK_DIR" }}/smarttv-regex.txt - https://perflyst.github.io/PiHoleBlocklist/regex.list
- {{ env "NOMAD_TASK_DIR" }}/wemo.txt wemo:
- {{ env "NOMAD_TASK_DIR" }}/sonos.txt - |
# Remote commands
api.xbcs.net
# Firmware updates
fw.xbcs.net
# TURN service
nat.wemo2.com
# Connectivity checks
heartbeat.xwemo.com
malware:
- https://mirror1.malwaredomains.com/files/justdomains
antisocial: antisocial:
- | - |
facebook.com facebook.com
@ -90,21 +73,20 @@ blocking:
reddit.com reddit.com
twitter.com twitter.com
youtube.com youtube.com
custom:
- {{ env "NOMAD_TASK_DIR" }}/block
whiteLists: whiteLists:
custom: # Move to Gitea when deployed internally
- {{ env "NOMAD_TASK_DIR" }}/allow ads:
{{ with nomadVar "nomad/jobs/blocky" -}}
{{ .whitelists_ads.Value | indent 6 }}
{{- end }}
clientGroupsBlock: clientGroupsBlock:
default: default:
- ads - ads
- custom - malware
192.168.3.1/24: - smarttv
- ads - wemo
- iot
- custom
customDNS: customDNS:
customTTL: 1h customTTL: 1h
@ -123,7 +105,7 @@ customDNS:
prometheus: prometheus:
enable: true enable: true
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}} {{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-tls" -}}
redis: redis:
address: 127.0.0.1:6379 address: 127.0.0.1:6379
# password: "" # password: ""
@ -132,6 +114,7 @@ redis:
connectionCooldown: 3s connectionCooldown: 3s
{{ end -}} {{ end -}}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}} {{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
{{ with nomadVar "nomad/jobs/blocky" -}} {{ with nomadVar "nomad/jobs/blocky" -}}
queryLog: queryLog:

View File

@ -1,13 +0,0 @@
# From: https://perflyst.github.io/PiHoleBlocklist/regex.list
# Title: Perflyst's SmartTV Blocklist for Pi-hole - RegEx extension
# Version: 13July2023v1
# Samsung
/(^|\.)giraffic\.com$/
/(^|\.)internetat\.tv$/
/(^|\.)pavv\.co\.kr$/
/(^|\.)samsungcloudsolution\.net$/
/(^|\.)samsungelectronics\.com$/
/(^|\.)samsungrm\.net$/
# /(^|\.)samsungotn\.net$/ # prevents updates
# /(^|\.)samsungcloudcdn\.com$/ # prevents updates
# /(^|\.)samsungcloudsolution\.com$/ # prevents internet connection

View File

@ -1,2 +0,0 @@
# Block Sonos devices from phoning home and allowing remote access
/(^|\.)sonos\.com$/

View File

@ -1,8 +0,0 @@
# Remote commands
api.xbcs.net
# Firmware updates
fw.xbcs.net
# TURN service
nat.wemo2.com
# Connectivity checks
heartbeat.xwemo.com

View File

@ -1,16 +1,8 @@
job "exporters" { job "exporters" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "system"
priority = 55
constraint {
distinct_hosts = true
}
group "promtail" { group "promtail" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/1202
count = 2
network { network {
mode = "bridge" mode = "bridge"
@ -41,8 +33,14 @@ job "exporters" {
task "promtail" { task "promtail" {
driver = "docker" driver = "docker"
meta = {
"diun.sort_tags" = "semver"
"diun.watch_repo" = true
"diun.include_tags" = "^[0-9]+\\.[0-9]+\\.[0-9]+$"
}
config { config {
image = "grafana/promtail:3.3.0" image = "grafana/promtail:2.9.1"
args = ["-config.file=$${NOMAD_TASK_DIR}/promtail.yml"] args = ["-config.file=$${NOMAD_TASK_DIR}/promtail.yml"]
ports = ["promtail"] ports = ["promtail"]

View File

@ -1,5 +0,0 @@
resource "nomad_job" "exporters" {
jobspec = templatefile("${path.module}/exporters.nomad", {
use_wesher = var.use_wesher,
})
}

View File

@ -28,6 +28,7 @@ job "grafana" {
tags = [ tags = [
"traefik.enable=true", "traefik.enable=true",
"traefik.http.routers.grafana.entryPoints=websecure", "traefik.http.routers.grafana.entryPoints=websecure",
# "traefik.http.routers.grafana.middlewares=authelia@nomad",
] ]
} }
@ -40,8 +41,8 @@ job "grafana" {
} }
config { config {
image = "iamthefij/stunnel:1.0.0" image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["/bin/sh", "$${NOMAD_TASK_DIR}/start.sh"]
} }
resources { resources {
@ -49,6 +50,15 @@ job "grafana" {
memory = 100 memory = 100
} }
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
EOF
destination = "$${NOMAD_TASK_DIR}/start.sh"
}
template { template {
data = <<EOF data = <<EOF
syslog = no syslog = no
@ -86,10 +96,10 @@ EOF
image = "mariadb:10" image = "mariadb:10"
args = [ args = [
"/usr/bin/timeout", "/usr/bin/timeout",
"20m", "2m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do echo 'Retry in 10s'; sleep 10; done", "until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
] ]
} }
@ -133,15 +143,14 @@ SELECT 'NOOP';
driver = "docker" driver = "docker"
config { config {
image = "grafana/grafana:10.0.10" image = "grafana/grafana:9.4.2"
args = ["--config", "$${NOMAD_ALLOC_DIR}/config/grafana.ini"] args = ["--config", "$${NOMAD_ALLOC_DIR}/config/grafana.ini"]
ports = ["web"] ports = ["web"]
} }
env = { env = {
"GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel,natel-discrete-panel", "GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel,natel-discrete-panel",
"GF_PATHS_CONFIG" = "$${NOMAD_ALLOC_DIR}/config/grafana.ini", "GF_PATHS_PROVISIONING" = "$${NOMAD_ALLOC_DIR}/config/provisioning"
"GF_PATHS_PROVISIONING" = "$${NOMAD_ALLOC_DIR}/config/provisioning",
} }
template { template {
@ -155,6 +164,7 @@ GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }}
GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .minio_access_key }} GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .minio_access_key }}
GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .minio_secret_key }} GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .minio_secret_key }}
GRAFANA_ALERT_EMAIL_ADDRESSES={{ .alert_email_addresses }} GRAFANA_ALERT_EMAIL_ADDRESSES={{ .alert_email_addresses }}
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET={{ .oidc_secret }}
{{ if .db_name -}} {{ if .db_name -}}
# Database storage # Database storage
GF_DATABASE_TYPE=mysql GF_DATABASE_TYPE=mysql
@ -166,10 +176,6 @@ GF_DATABASE_PASSWORD={{ .db_pass }}
SLACK_BOT_URL={{ .slack_bot_url }} SLACK_BOT_URL={{ .slack_bot_url }}
SLACK_BOT_TOKEN={{ .slack_bot_token }} SLACK_BOT_TOKEN={{ .slack_bot_token }}
SLACK_HOOK_URL={{ .slack_hook_url }} SLACK_HOOK_URL={{ .slack_hook_url }}
{{ end -}}
{{ with nomadVar "secrets/authelia/grafana" -}}
GF_AUTH_GENERIC_OAUTH_CLIENT_ID={{ .client_id }}
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET={{ .secret }}
{{ end -}} {{ end -}}
EOF EOF
env = true env = true
@ -196,12 +202,8 @@ GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET={{ .secret }}
} }
resources { resources {
cpu = 50 cpu = 100
memory = 50 memory = 100
}
action "reloadnow" {
command = "/local/reload_config.sh"
} }
env = { env = {
@ -263,7 +265,7 @@ ${file(join("/", [module_path, "grafana", config_file]))}
# Set owner to grafana uid # Set owner to grafana uid
# uid = 472 # uid = 472
# Change template delimeter for dashboard files that use json and have double curly braces and square braces # Change template delimeter for dashboard files that use json and have double curly braces and square braces
%{ if endswith(config_file, ".json") ~} %{ if length(regexall("dashboard", config_file)) > 0 ~}
left_delimiter = "<<<<" left_delimiter = "<<<<"
right_delimiter = ">>>>" right_delimiter = ">>>>"
%{ endif } %{ endif }
@ -279,11 +281,6 @@ ${file(join("/", [module_path, "grafana", config_file]))}
task "grafana-image-renderer" { task "grafana-image-renderer" {
driver = "docker" driver = "docker"
constraint {
attribute = "$${attr.cpu.arch}"
value = "amd64"
}
config { config {
image = "grafana/grafana-image-renderer:3.6.1" image = "grafana/grafana-image-renderer:3.6.1"
ports = ["renderer"] ports = ["renderer"]

View File

@ -20,8 +20,8 @@ data = /var/lib/grafana
# Directory where grafana will automatically scan and look for plugins # Directory where grafana will automatically scan and look for plugins
;plugins = /var/lib/grafana/plugins ;plugins = /var/lib/grafana/plugins
# folder that contains PROVISIONING config files that grafana will apply on startup and while running. # folder that contains provisioning config files that grafana will apply on startup and while running.
provisioning = from_env ; provisioning = /etc/grafana/provisioning
#################################### Server #################################### #################################### Server ####################################
[server] [server]
@ -261,7 +261,7 @@ log_queries =
enabled = true enabled = true
name = Authelia name = Authelia
;allow_sign_up = true ;allow_sign_up = true
client_id = from_env client_id = grafana
client_secret = from_env client_secret = from_env
scopes = openid profile email groups scopes = openid profile email groups
auth_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/authorization auth_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/authorization
@ -270,10 +270,6 @@ api_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{
login_attribute_path = preferred_username login_attribute_path = preferred_username
groups_attribute_path = groups groups_attribute_path = groups
name_attribute_path = name name_attribute_path = name
# Role attribute path is not working
role_attribute_path = contains(groups[*], 'admin') && 'Admin' || contains(groups[*], 'grafana-admin') && 'Admin' || contains(groups[*], 'grafana-editor') && 'Editor' || contains(groups[*], 'developer') && 'Editor'
allow_assign_grafana_admin = true
skip_org_role_sync = true
use_pkce = true use_pkce = true
;team_ids = ;team_ids =

View File

@ -104,7 +104,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": false, "exemplar": false,
"expr": "sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"instant": true, "instant": true,
"interval": "", "interval": "",
@ -458,7 +458,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": true, "exemplar": true,
"expr": "sum(blocky_blacklist_cache) / sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(blocky_blacklist_cache) / sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"instant": false, "instant": false,
"interval": "", "interval": "",
@ -533,7 +533,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": true, "exemplar": true,
"expr": "sum(go_memstats_sys_bytes{job=\"exporters\", consul_service=\"blocky-api\"})/sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(go_memstats_sys_bytes{job=\"exporters\", consul_service=\"blocky-api\"})/sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"instant": false, "instant": false,
"interval": "", "interval": "",
@ -753,7 +753,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": true, "exemplar": true,
"expr": "sum(blocky_cache_entry_count)/ sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(blocky_cache_entry_count)/ sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"instant": false, "instant": false,
"interval": "", "interval": "",
@ -1162,7 +1162,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": false, "exemplar": false,
"expr": "sum(time() -blocky_last_list_group_refresh)/ sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(time() -blocky_last_list_group_refresh)/ sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"instant": true, "instant": true,
"interval": "", "interval": "",
@ -1224,7 +1224,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": true, "exemplar": true,
"expr": "sum(blocky_prefetch_domain_name_cache_count)/ sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(blocky_prefetch_domain_name_cache_count)/ sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"interval": "", "interval": "",
"legendFormat": "", "legendFormat": "",

View File

@ -0,0 +1,783 @@
{
"__inputs": [
{
"name": "DS_PROMETHEUS",
"label": "Prometheus",
"description": "",
"type": "datasource",
"pluginId": "prometheus",
"pluginName": "Prometheus"
}
],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "7.5.5"
},
{
"type": "panel",
"id": "graph",
"name": "Graph",
"version": ""
},
{
"type": "panel",
"id": "piechart",
"name": "Pie chart v2",
"version": ""
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
},
{
"type": "panel",
"id": "singlestat",
"name": "Singlestat",
"version": ""
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"description": "Traefik dashboard prometheus",
"editable": true,
"gnetId": 4475,
"graphTooltip": 0,
"id": null,
"iteration": 1620932097756,
"links": [],
"panels": [
{
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
"id": 10,
"title": "$backend stats",
"type": "row"
},
{
"cacheTimeout": null,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 1
},
"id": 2,
"interval": null,
"links": [],
"maxDataPoints": 3,
"options": {
"displayLabels": [],
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"values": [
"value",
"percent"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"text": {}
},
"targets": [
{
"exemplar": true,
"expr": "traefik_service_requests_total{service=\"$service\"}",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{method}} : {{code}}",
"refId": "A"
}
],
"title": "$service return code",
"type": "piechart"
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"format": "ms",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 1
},
"id": 4,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": true
},
"tableColumn": "",
"targets": [
{
"exemplar": true,
"expr": "sum(traefik_service_request_duration_seconds_sum{service=\"$service\"}) / sum(traefik_service_requests_total{service=\"$service\"}) * 1000",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "",
"refId": "A"
}
],
"thresholds": "",
"title": "$service response time",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "avg"
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 24,
"x": 0,
"y": 8
},
"hiddenSeries": false,
"id": 3,
"legend": {
"alignAsTable": true,
"avg": true,
"current": false,
"max": true,
"min": true,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.5",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"exemplar": true,
"expr": "sum(rate(traefik_service_requests_total{service=\"$service\"}[5m]))",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "Total requests $service",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Total requests over 5min $service",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"collapsed": false,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 15
},
"id": 12,
"panels": [],
"title": "Global stats",
"type": "row"
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 16
},
"hiddenSeries": false,
"id": 5,
"legend": {
"alignAsTable": true,
"avg": false,
"current": true,
"max": true,
"min": true,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.5",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": true,
"steppedLine": false,
"targets": [
{
"expr": "rate(traefik_entrypoint_requests_total{entrypoint=~\"$entrypoint\",code=\"200\"}[5m])",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{method}} : {{code}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Status code 200 over 5min",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 16
},
"hiddenSeries": false,
"id": 6,
"legend": {
"alignAsTable": true,
"avg": false,
"current": true,
"max": true,
"min": true,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.5",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": true,
"steppedLine": false,
"targets": [
{
"expr": "rate(traefik_entrypoint_requests_total{entrypoint=~\"$entrypoint\",code!=\"200\"}[5m])",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{ method }} : {{code}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Others status code over 5min",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"cacheTimeout": null,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 23
},
"id": 7,
"interval": null,
"links": [],
"maxDataPoints": 3,
"options": {
"displayLabels": [],
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"values": [
"value"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"sum"
],
"fields": "",
"values": false
},
"text": {}
},
"targets": [
{
"exemplar": true,
"expr": "sum(rate(traefik_service_requests_total[5m])) by (service) ",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{ service }}",
"refId": "A"
}
],
"title": "Requests by service",
"type": "piechart"
},
{
"cacheTimeout": null,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 23
},
"id": 8,
"interval": null,
"links": [],
"maxDataPoints": 3,
"options": {
"displayLabels": [],
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"values": [
"value"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"sum"
],
"fields": "",
"values": false
},
"text": {}
},
"targets": [
{
"exemplar": true,
"expr": "sum(rate(traefik_entrypoint_requests_total{entrypoint =~ \"$entrypoint\"}[5m])) by (entrypoint) ",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{ entrypoint }}",
"refId": "A"
}
],
"title": "Requests by protocol",
"type": "piechart"
}
],
"schemaVersion": 27,
"style": "dark",
"tags": [
"traefik",
"prometheus"
],
"templating": {
"list": [
{
"allValue": null,
"current": {},
"datasource": "${DS_PROMETHEUS}",
"definition": "label_values(service)",
"description": null,
"error": null,
"hide": 0,
"includeAll": false,
"label": null,
"multi": false,
"name": "service",
"options": [],
"query": {
"query": "label_values(service)",
"refId": "StandardVariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"allValue": null,
"current": {},
"datasource": "${DS_PROMETHEUS}",
"definition": "",
"description": null,
"error": null,
"hide": 0,
"includeAll": true,
"label": null,
"multi": true,
"name": "entrypoint",
"options": [],
"query": {
"query": "label_values(entrypoint)",
"refId": "Prometheus-entrypoint-Variable-Query"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Traefik",
"uid": "qPdAviJmz",
"version": 10
}

View File

@ -5,4 +5,4 @@ providers:
type: file type: file
disableDeletion: false disableDeletion: false
options: options:
path: {{ env "NOMAD_ALLOC_DIR" }}/config/provisioning/dashboards/default path: /etc/grafana/provisioning/dashboards/default

View File

@ -1,19 +0,0 @@
---
apiVersion: 1
datasources:
- name: HASS Metrics
url: "http://192.168.2.75:8086"
type: influxdb
access: proxy
database: hass
jsonData:
dbName: hass
- name: Proxmox Metrics
url: "http://192.168.2.75:8086"
type: influxdb
access: proxy
database: proxmox
jsonData:
dbName: proxmox

View File

@ -1,96 +0,0 @@
variable "lego_version" {
default = "4.14.2"
type = string
}
variable "nomad_var_dirsync_version" {
default = "0.0.2"
type = string
}
job "lego" {
type = "batch"
periodic {
cron = "@weekly"
prohibit_overlap = true
}
group "main" {
network {
dns {
servers = ["1.1.1.1", "1.0.0.1"]
}
}
task "main" {
driver = "exec"
config {
command = "/bin/bash"
args = ["${NOMAD_TASK_DIR}/start.sh"]
}
artifact {
source = "https://github.com/go-acme/lego/releases/download/v${var.lego_version}/lego_v${var.lego_version}_linux_${attr.cpu.arch}.tar.gz"
}
artifact {
source = "https://git.iamthefij.com/iamthefij/nomad-var-dirsync/releases/download/v${var.nomad_var_dirsync_version}/nomad-var-dirsync-linux-${attr.cpu.arch}.tar.gz"
}
template {
data = <<EOH
#! /bin/sh
set -ex
cd ${NOMAD_TASK_DIR}
echo "Read certs from nomad vars"
${NOMAD_TASK_DIR}/nomad-var-dirsync-linux-{{ env "attr.cpu.arch" }} -root-var=secrets/certs read .
action=run
if [ -f /.lego/certificates/_.thefij.rocks.crt ]; then
action=renew
fi
echo "Attempt to $action certificates"
${NOMAD_TASK_DIR}/lego \
--accept-tos --pem \
--email=iamthefij@gmail.com \
--domains="*.thefij.rocks" \
--dns="cloudflare" \
$action \
--$action-hook="${NOMAD_TASK_DIR}/nomad-var-dirsync-linux-{{ env "attr.cpu.arch" }} -root-var=secrets/certs write .lego" \
EOH
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/lego" -}}
CF_DNS_API_TOKEN={{ .domain_lego_dns }}
CF_ZONE_API_TOKEN={{ .domain_lego_dns }}
{{- end }}
EOH
destination = "secrets/cloudflare.env"
env = true
}
env = {
NOMAD_ADDR = "unix:///secrets/api.sock"
}
identity {
env = true
}
resources {
cpu = 50
memory = 100
}
}
}
}

View File

@ -1,23 +0,0 @@
resource "nomad_job" "lego" {
jobspec = file("${path.module}/lego.nomad")
}
resource "nomad_acl_policy" "secrets_certs_write" {
name = "secrets-certs-write"
description = "Write certs to secrets store"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/certs/*" {
capabilities = ["write", "read"]
}
path "secrets/certs" {
capabilities = ["write", "read"]
}
}
}
EOH
job_acl {
job_id = "lego/*"
}
}

View File

@ -3,27 +3,31 @@ auth_enabled: false
server: server:
http_listen_port: 3100 http_listen_port: 3100
common: ingester:
ring: lifecycler:
instance_addr: 127.0.0.1 address: 127.0.0.1
kvstore: ring:
store: inmemory kvstore:
replication_factor: 1 store: inmemory
path_prefix: /tmp/loki replication_factor: 1
final_sleep: 0s
chunk_idle_period: 5m
chunk_retain_period: 30s
max_transfer_retries: 0
schema_config: schema_config:
configs: configs:
- from: 2020-05-15 - from: 2018-04-15
store: boltdb-shipper store: boltdb
object_store: filesystem object_store: filesystem
schema: v11 schema: v11
index: index:
prefix: index_ prefix: index_
period: 24h period: 168h
storage_config: storage_config:
boltdb_shipper: boltdb:
active_index_directory: {{ env "NOMAD_TASK_DIR" }}/index directory: {{ env "NOMAD_TASK_DIR" }}/index
filesystem: filesystem:
directory: {{ env "NOMAD_TASK_DIR" }}/chunks directory: {{ env "NOMAD_TASK_DIR" }}/chunks
@ -34,8 +38,8 @@ limits_config:
reject_old_samples_max_age: 168h reject_old_samples_max_age: 168h
chunk_store_config: chunk_store_config:
max_look_back_period: 168h max_look_back_period: 0s
table_manager: table_manager:
retention_deletes_enabled: true retention_deletes_enabled: false
retention_period: 168h retention_period: 0s

View File

@ -3,17 +3,15 @@ module "loki" {
detach = false detach = false
name = "loki" name = "loki"
image = "grafana/loki:2.8.7" image = "grafana/loki:2.2.1"
args = ["--config.file=$${NOMAD_TASK_DIR}/loki-config.yml"] args = ["--config.file=$${NOMAD_TASK_DIR}/loki-config.yml"]
service_port = 3100 service_port = 3100
ingress = true ingress = true
use_wesher = var.use_wesher use_wesher = var.use_wesher
service_check = {
path = "/ready"
}
sticky_disk = true sticky_disk = true
# healthcheck = "/ready"
templates = [ templates = [
{ {
data = file("${path.module}/loki-config.yml") data = file("${path.module}/loki-config.yml")

View File

@ -1,3 +1,17 @@
resource "nomad_job" "exporters" {
jobspec = templatefile("${path.module}/exporters.nomad", {
use_wesher = var.use_wesher,
})
}
resource "nomad_job" "prometheus" {
jobspec = templatefile("${path.module}/prometheus.nomad", {
use_wesher = var.use_wesher,
})
detach = false
}
resource "nomad_job" "grafana" { resource "nomad_job" "grafana" {
jobspec = templatefile("${path.module}/grafana.nomad", { jobspec = templatefile("${path.module}/grafana.nomad", {
module_path = path.module module_path = path.module
@ -79,39 +93,3 @@ EOH
task = "stunnel" task = "stunnel"
} }
} }
module "grafana_oidc" {
source = "./oidc_client"
name = "grafana"
oidc_client_config = {
description = "Grafana"
scopes = [
"openid",
"groups",
"email",
"profile",
]
redirect_uris = [
"https://grafana.thefij.rocks/login/generic_oauth",
]
}
job_acl = {
job_id = "grafana"
group = "grafana"
task = "grafana"
}
}
# resource "nomad_variable" "grafana_config" {
# for_each = fileset("${path.module}/grafana", "**")
#
# path = "nomad/jobs/grafana/${replace(each.key, ".", "_")}"
# items = {
# path = "${each.key}"
# value = file("${path.module}/grafana/${each.key}")
# left_delimiter = endswith(each.key, ".json") ? "<<<<" : "{{"
# right_delimiter = endswith(each.key, ".json") ? ">>>>" : "}}"
# }
# }

View File

@ -24,8 +24,7 @@ job "nomad-client-stalker" {
resources { resources {
cpu = 10 cpu = 10
memory = 15 memory = 10
memory_max = 30
} }
} }
} }

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.3.1"
hashes = [
"h1:lMueBNB2GJ/a5rweL9NPybwVfDH/Q1s+rQvt5Y+kuYs=",
"zh:1e7893a3fbebff171bcc5581b70a16eea33193c7e9dd73402ba5c04b7202f0bb",
"zh:252cfd3fee4811c83bc74406ba1bc1bbb83d6de20e50a86f93737f8f86864171",
"zh:387a7140be6dfa3f8d27f09d1eb2b9f3b84900328fe5a0478e9b3bd91a845808",
"zh:49848fa491ac26b0568b112a57d14cc49772607c7cf405e2f74dd537407214b1",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:7b9f345f5bb5f17c5d0bc3d373c25828934a3cbcdb331e0eab54eb47f1355fb2",
"zh:8e276f4de508a86e725fffc02ee891db73397c35dbd591d8918af427eeec93a1",
"zh:90b349933d2fd28f822a36128be4625bb816aa9f20ec314c79c77306f632ae87",
"zh:a0ca6fd6cd94a52684e432104d3dc170a74075f47d9d4ba725cc340a438ed75a",
"zh:a6cffc45535a0ff8206782538b3eeaef17dc93d0e1fd58bc1e6f7d5aa0f6ba1a",
"zh:c010807b5d3e03d769419787b0e5d4efa6963134e1873a413102af6bf3dd1c49",
"zh:faf962ee1981e897e99f7e528642c7e74beed37afd8eaf743e6ede24df812d80",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.2"
hashes = [
"h1:wmG0QFjQ2OfyPy6BB7mQ57WtoZZGGV07uAPQeDmIrAE=",
"zh:0ef01a4f81147b32c1bea3429974d4d104bbc4be2ba3cfa667031a8183ef88ec",
"zh:1bcd2d8161e89e39886119965ef0f37fcce2da9c1aca34263dd3002ba05fcb53",
"zh:37c75d15e9514556a5f4ed02e1548aaa95c0ecd6ff9af1119ac905144c70c114",
"zh:4210550a767226976bc7e57d988b9ce48f4411fa8a60cd74a6b246baf7589dad",
"zh:562007382520cd4baa7320f35e1370ffe84e46ed4e2071fdc7e4b1a9b1f8ae9b",
"zh:5efb9da90f665e43f22c2e13e0ce48e86cae2d960aaf1abf721b497f32025916",
"zh:6f71257a6b1218d02a573fc9bff0657410404fb2ef23bc66ae8cd968f98d5ff6",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:9647e18f221380a85f2f0ab387c68fdafd58af6193a932417299cdcae4710150",
"zh:bb6297ce412c3c2fa9fec726114e5e0508dd2638cad6a0cb433194930c97a544",
"zh:f83e925ed73ff8a5ef6e3608ad9225baa5376446349572c2449c0c0b3cf184b7",
"zh:fbef0781cb64de76b1df1ca11078aecba7800d82fd4a956302734999cfd9a4af",
]
}

View File

@ -1,50 +0,0 @@
resource "random_password" "oidc_client_id" {
length = 72
override_special = "-._~"
}
resource "random_password" "oidc_secret" {
length = 72
override_special = "-._~"
}
resource "nomad_variable" "authelia_oidc_secret" {
path = "secrets/authelia/${var.name}"
items = {
client_id = resource.random_password.oidc_client_id.result
secret = resource.random_password.oidc_secret.result
secret_hash = resource.random_password.oidc_secret.bcrypt_hash
}
}
resource "nomad_variable" "authelia_access_control_oidc" {
path = "authelia/access_control/oidc_clients/${var.name}"
items = {
id = resource.random_password.oidc_client_id.result
description = var.oidc_client_config.description
authorization_policy = var.oidc_client_config.authorization_policy
redirect_uris = yamlencode(var.oidc_client_config.redirect_uris)
scopes = yamlencode(var.oidc_client_config.scopes)
}
}
resource "nomad_acl_policy" "oidc_authelia" {
count = var.job_acl != null ? 1 : 0
name = "${var.name}-authelia"
description = "Give access to shared authelia variables"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/authelia/${var.name}" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = var.job_acl.job_id
group = var.job_acl.group
task = var.job_acl.task
}
}

View File

@ -1,11 +0,0 @@
output "client_id" {
value = resource.random_password.oidc_client_id.result
}
output "secret" {
value = resource.random_password.oidc_secret.result
}
output "secret_hash" {
value = resource.random_password.oidc_secret.bcrypt_hash
}

View File

@ -1,25 +0,0 @@
variable "name" {
description = "Name of service"
type = string
}
variable "oidc_client_config" {
description = "Authelia oidc client configuration to enable oidc authentication"
type = object({
description = string
authorization_policy = optional(string, "one_factor")
redirect_uris = list(string)
scopes = list(string)
})
}
variable "job_acl" {
description = "Job ACL that should be given to the secrets"
type = object({
job_id = string
group = optional(string)
task = optional(string)
})
default = null
}

View File

@ -37,36 +37,12 @@ job "prometheus" {
"traefik.enable=true", "traefik.enable=true",
"traefik.http.routers.prometheus.entryPoints=websecure", "traefik.http.routers.prometheus.entryPoints=websecure",
] ]
check {
type = "http"
path = "/-/healthy"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
} }
service { service {
name = "pushgateway" name = "pushgateway"
provider = "nomad" provider = "nomad"
port = "pushgateway" port = "pushgateway"
check {
type = "http"
path = "/-/healthy"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
} }
task "prometheus" { task "prometheus" {

View File

@ -1,7 +0,0 @@
resource "nomad_job" "prometheus" {
jobspec = templatefile("${path.module}/prometheus.nomad", {
use_wesher = var.use_wesher,
})
detach = false
}

View File

@ -26,8 +26,14 @@ job "syslogng" {
task "promtail" { task "promtail" {
driver = "docker" driver = "docker"
meta = {
"diun.sort_tags" = "semver"
"diun.watch_repo" = true
"diun.include_tags" = "^[0-9]+\\.[0-9]+\\.[0-9]+$"
}
config { config {
image = "grafana/promtail:3.3.0" image = "grafana/promtail:2.9.1"
ports = ["main", "metrics"] ports = ["main", "metrics"]
args = ["--config.file=/etc/promtail/promtail.yml"] args = ["--config.file=/etc/promtail/promtail.yml"]
@ -66,7 +72,7 @@ EOF
resources { resources {
cpu = 50 cpu = 50
memory = 50 memory = 20
} }
} }
} }
@ -130,7 +136,7 @@ EOF
resources { resources {
cpu = 50 cpu = 50
memory = 50 memory = 10
} }
} }
} }

View File

@ -2,20 +2,20 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "2.1.0" version = "1.4.17"
hashes = [ hashes = [
"h1:ek0L7fA+4R1/BXhbutSRqlQPzSZ5aY/I2YfVehuYeEU=", "h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:39ba4d4fc9557d4d2c1e4bf866cf63973359b73e908cce237c54384512bdb454", "zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:40d2b66e3f3675e6b88000c145977c1d5288510c76b702c6c131d9168546c605", "zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:40fbe575d85a083f96d4703c6b7334e9fc3e08e4f1d441de2b9513215184ebcc", "zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:42ce6db79e2f94557fae516ee3f22e5271f0b556638eb45d5fbad02c99fc7af3", "zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:4acf63dfb92f879b3767529e75764fef68886521b7effa13dd0323c38133ce88", "zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:72cf35a13c2fb542cd3c8528826e2390db9b8f6f79ccb41532e009ad140a3269", "zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:8b8bcc136c05916234cb0c3bcc3d48fda7ca551a091ad8461ea4ab16fb6960a3", "zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:8e1c2f924eae88afe7ac83775f000ae8fd71a04e06228edf7eddce4df2421169", "zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:abc6e725531fc06a8e02e84946aaabc3453ecafbc1b7a442ea175db14fd9c86a", "zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:b735fcd1fb20971df3e92f81bb6d73eef845dcc9d3d98e908faa3f40013f0f69", "zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:ce59797282505d872903789db8f092861036da6ec3e73f6507dac725458a5ec9", "zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
] ]
} }

View File

@ -14,15 +14,13 @@ job "traefik" {
update { update {
max_parallel = 1 max_parallel = 1
canary = 1 # canary = 1
auto_promote = false # auto_promote = true
auto_revert = true auto_revert = true
min_healthy_time = "30s"
healthy_deadline = "5m"
} }
group "traefik" { group "traefik" {
count = 2 count = 1
network { network {
port "web" { port "web" {
@ -37,17 +35,12 @@ job "traefik" {
static = 514 static = 514
} }
port "gitssh" {
static = 2222
}
port "metrics" {}
dns { dns {
servers = [ servers = [
"192.168.2.101", "192.168.2.101",
"192.168.2.102", "192.168.2.102",
"192.168.2.30", "192.168.2.30",
"192.168.2.170",
] ]
} }
} }
@ -57,42 +50,39 @@ job "traefik" {
sticky = true sticky = true
} }
service {
name = "traefik"
provider = "nomad"
port = "web"
check {
type = "http"
path = "/ping"
port = "web"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.traefik.entryPoints=websecure",
"traefik.http.routers.traefik.service=api@internal",
]
}
task "traefik" { task "traefik" {
driver = "docker" driver = "docker"
service { meta = {
name = "traefik" "diun.sort_tags" = "semver"
provider = "nomad" "diun.watch_repo" = true
port = "web" "diun.include_tags" = "^[0-9]+\\.[0-9]+$"
check {
type = "http"
path = "/ping"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.traefik.entryPoints=websecure",
"traefik.http.routers.traefik.service=api@internal",
]
}
service {
name = "traefik-metrics"
provider = "nomad"
port = "metrics"
tags = [
"prometheus.scrape",
]
} }
config { config {
image = "traefik:3.0" image = "traefik:2.9"
ports = ["web", "websecure", "syslog", "gitssh", "metrics"] ports = ["web", "websecure"]
network_mode = "host" network_mode = "host"
mount { mount {
@ -106,20 +96,6 @@ job "traefik" {
target = "/etc/traefik/usersfile" target = "/etc/traefik/usersfile"
source = "secrets/usersfile" source = "secrets/usersfile"
} }
mount {
type = "bind"
target = "/etc/traefik/certs"
source = "secrets/certs"
}
}
env = {
TRAEFIK_PROVIDERS_NOMAD_ENDPOINT_TOKEN = "${NOMAD_TOKEN}"
}
identity {
env = true
} }
template { template {
@ -142,9 +118,12 @@ job "traefik" {
[entryPoints.websecure] [entryPoints.websecure]
address = ":443" address = ":443"
[entryPoints.websecure.http.tls] [entryPoints.websecure.http.tls]
certResolver = "letsEncrypt"
[[entryPoints.websecure.http.tls.domains]]
main = "*.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>"
[entryPoints.metrics] [entryPoints.metrics]
address = ":<< env "NOMAD_PORT_metrics" >>" address = ":8989"
[entryPoints.syslogtcp] [entryPoints.syslogtcp]
address = ":514" address = ":514"
@ -152,9 +131,6 @@ job "traefik" {
[entryPoints.syslogudp] [entryPoints.syslogudp]
address = ":514/udp" address = ":514/udp"
[entryPoints.gitssh]
address = ":2222"
[api] [api]
dashboard = true dashboard = true
@ -174,9 +150,31 @@ job "traefik" {
exposedByDefault = false exposedByDefault = false
defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)" defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)"
[providers.nomad.endpoint] [providers.nomad.endpoint]
address = "unix:///secrets/api.sock" address = "http://<< env "attr.unique.network.ip-address" >>:4646"
<< if nomadVarExists "nomad/jobs/traefik" ->>
[certificatesResolvers.letsEncrypt.acme]
email = "<< with nomadVar "nomad/jobs/traefik" >><< .acme_email >><< end >>"
# Store in /local because /secrets doesn't persist with ephemeral disk
storage = "/local/acme.json"
[certificatesResolvers.letsEncrypt.acme.dnsChallenge]
provider = "cloudflare"
resolvers = ["1.1.1.1:53", "8.8.8.8:53"]
delayBeforeCheck = 0
<<- end >>
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/traefik.toml" destination = "local/config/traefik.toml"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/traefik" -}}
CF_DNS_API_TOKEN={{ .domain_lego_dns }}
CF_ZONE_API_TOKEN={{ .domain_lego_dns }}
{{- end }}
EOH
destination = "secrets/cloudflare.env"
env = true
} }
template { template {
@ -187,48 +185,23 @@ job "traefik" {
entryPoints = ["websecure"] entryPoints = ["websecure"]
service = "nomad" service = "nomad"
rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)" rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
[http.routers.hass]
{{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path }}
[http.routers.{{ .name }}]
entryPoints = ["websecure"] entryPoints = ["websecure"]
service = "{{ .name }}" service = "hass"
rule = "Host(`{{ .subdomain }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`){{ with .path_prefix.Value }}&&PathPrefix(`{{ . }}`){{ end }}" rule = "Host(`hass.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
{{ $name := .name -}}
{{ with .path_prefix.Value -}}
middlewares = ["{{ $name }}@file"]
{{ end }}
{{- end }}{{ end }}
#[http.middlewares]
# {{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path -}}
# {{ $name := .name -}}
# {{ with .path_prefix.Value -}}
# [http.middlewares.{{ $name }}.stripPrefix]
# prefixes = ["{{ . }}"]
# {{ end }}
# {{- end }}{{ end }}
[http.services] [http.services]
[http.services.nomad] [http.services.nomad]
[http.services.nomad.loadBalancer] [http.services.nomad.loadBalancer]
[[http.services.nomad.loadBalancer.servers]] [[http.services.nomad.loadBalancer.servers]]
url = "http://127.0.0.1:4646" url = "http://127.0.0.1:4646"
[http.services.hass]
{{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path }} [http.services.hass.loadBalancer]
[http.services.{{ .name }}] [[http.services.hass.loadBalancer.servers]]
[http.services.{{ .name }}.loadBalancer] url = "http://192.168.3.65:8123"
[[http.services.{{ .name }}.loadBalancer.servers]]
url = "{{ .url }}"
{{- end }}{{ end }}
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/conf/route-hashi.toml" destination = "local/config/conf/route-hashi.toml"
change_mode = "noop" change_mode = "noop"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
} }
template { template {
@ -264,39 +237,7 @@ job "traefik" {
{{ end -}} {{ end -}}
{{- end }} {{- end }}
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/conf/route-syslog-ng.toml" destination = "local/config/conf/route-syslog-ng.toml"
change_mode = "noop"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{- with nomadVar "secrets/certs/_lego/certificates/__thefij_rocks_crt" }}{{ .contents }}{{ end -}}"
EOF
destination = "${NOMAD_SECRETS_DIR}/certs/_.thefij.rocks.crt"
change_mode = "noop"
}
template {
data = <<EOF
{{- with nomadVar "secrets/certs/_lego/certificates/__thefij_rocks_key" }}{{ .contents }}{{ end -}}"
EOF
destination = "${NOMAD_SECRETS_DIR}/certs/_.thefij.rocks.key"
change_mode = "noop"
}
template {
data = <<EOH
[[tls.certificates]]
certFile = "/etc/traefik/certs/_.thefij.rocks.crt"
keyFile = "/etc/traefik/certs/_.thefij.rocks.key"
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/dynamic-tls.toml"
change_mode = "noop" change_mode = "noop"
} }
@ -306,11 +247,12 @@ EOF
{{ with nomadVar "nomad/jobs/traefik" }} {{ with nomadVar "nomad/jobs/traefik" }}
{{ if .usersfile }} {{ if .usersfile }}
[http.middlewares.basic-auth.basicAuth] [http.middlewares.basic-auth.basicAuth]
# TODO: Reference secrets mount
usersFile = "/etc/traefik/usersfile" usersFile = "/etc/traefik/usersfile"
{{- end }} {{- end }}
{{- end }} {{- end }}
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/conf/middlewares.toml" destination = "local/config/conf/middlewares.toml"
change_mode = "noop" change_mode = "noop"
} }
@ -320,7 +262,7 @@ EOF
{{ .usersfile }} {{ .usersfile }}
{{- end }} {{- end }}
EOH EOH
destination = "${NOMAD_SECRETS_DIR}/usersfile" destination = "secrets/usersfile"
change_mode = "noop" change_mode = "noop"
} }

View File

@ -1,90 +1,3 @@
resource "nomad_job" "traefik" { resource "nomad_job" "traefik" {
jobspec = file("${path.module}/traefik.nomad") jobspec = file("${path.module}/traefik.nomad")
} }
resource "nomad_acl_policy" "treafik_secrets_certs_read" {
name = "traefik-secrets-certs-read"
description = "Read certs to secrets store"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/certs/*" {
capabilities = ["read"]
}
path "secrets/certs" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = resource.nomad_job.traefik.id
}
}
resource "nomad_acl_policy" "traefik_query_jobs" {
name = "traefik-query-jobs"
description = "Allow traefik to query jobs"
rules_hcl = <<EOH
namespace "default" {
capabilities = ["list-jobs", "read-job"]
}
EOH
job_acl {
job_id = resource.nomad_job.traefik.id
}
}
resource "nomad_acl_policy" "treafik_external" {
name = "traefik-exernal"
description = "Read external services"
rules_hcl = <<EOH
namespace "default" {
variables {
path "traefik_external/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "traefik"
}
}
resource "nomad_variable" "traefik_external_hass" {
path = "traefik_external/hass"
items = {
name = "hass"
subdomain = "hass",
url = "http://192.168.3.65:8123"
}
}
resource "nomad_variable" "traefik_external_plex" {
path = "traefik_external/plex"
items = {
name = "plex"
subdomain = "plex",
url = "http://agnosticfront.thefij:32400"
}
}
resource "nomad_variable" "traefik_external_appdaemon" {
path = "traefik_external/appdaemon"
items = {
name = "appdaemon"
subdomain = "appdash",
url = "http://192.168.3.65:5050"
# path_prefix = "/add"
}
}
resource "nomad_variable" "traefik_external_jellyfin" {
path = "traefik_external/jellyfin"
items = {
name = "jellyfin"
subdomain = "jellyfin",
url = "http://agnosticfront.thefij:8096"
}
}

View File

@ -3,10 +3,6 @@ job "lldap" {
type = "service" type = "service"
priority = 80 priority = 80
update {
auto_revert = true
}
group "lldap" { group "lldap" {
network { network {
@ -74,12 +70,10 @@ job "lldap" {
data = <<EOH data = <<EOH
ldap_base_dn = "{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}" ldap_base_dn = "{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}"
{{ with nomadVar "secrets/ldap" -}} {{ with nomadVar "nomad/jobs/lldap" -}}
ldap_user_dn = "{{ .admin_user }}" ldap_user_dn = "{{ .admin_user }}"
ldap_user_email = "{{ .admin_email }}" ldap_user_email = "{{ .admin_email }}"
{{ end -}}
{{ with nomadVar "nomad/jobs/lldap" -}}
[smtp_options] [smtp_options]
from = "{{ .smtp_from }}" from = "{{ .smtp_from }}"
reply_to = "{{ .smtp_reply_to }}" reply_to = "{{ .smtp_reply_to }}"
@ -115,7 +109,7 @@ user = "{{ .user }}"
} }
template { template {
data = "{{ with nomadVar \"secrets/ldap\" }}{{ .admin_password }}{{ end }}" data = "{{ with nomadVar \"nomad/jobs/lldap\" }}{{ .admin_password }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/user_pass.txt" destination = "$${NOMAD_SECRETS_DIR}/user_pass.txt"
change_mode = "restart" change_mode = "restart"
} }
@ -145,7 +139,7 @@ user = "{{ .user }}"
image = "mariadb:10" image = "mariadb:10"
args = [ args = [
"/usr/bin/timeout", "/usr/bin/timeout",
"20m", "2m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done", "until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
@ -199,9 +193,9 @@ SELECT 'NOOP';
} }
config { config {
image = "iamthefij/stunnel:1.0.0" image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
args = ["/bin/sh", "$${NOMAD_TASK_DIR}/start.sh"]
} }
resources { resources {
@ -209,6 +203,15 @@ SELECT 'NOOP';
memory = 100 memory = 100
} }
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
EOF
destination = "$${NOMAD_TASK_DIR}/start.sh"
}
template { template {
data = <<EOF data = <<EOF
syslog = no syslog = no
@ -219,7 +222,7 @@ delay = yes
accept = {{ env "NOMAD_PORT_tls" }} accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:{{ env "NOMAD_PORT_ldap" }} connect = 127.0.0.1:{{ env "NOMAD_PORT_ldap" }}
ciphers = PSK ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt PSKsecrets = {{ env "NOMAD_TASK_DIR" }}/stunnel_psk.txt
[mysql_client] [mysql_client]
client = yes client = yes
@ -238,7 +241,7 @@ PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
{{ with nomadVar .Path }}{{ .psk }}{{ end }} {{ with nomadVar .Path }}{{ .psk }}{{ end }}
{{ end -}} {{ end -}}
EOF EOF
destination = "$${NOMAD_SECRETS_DIR}/stunnel_psk.txt" destination = "$${NOMAD_TASK_DIR}/stunnel_psk.txt"
} }
template { template {

View File

@ -9,42 +9,6 @@ resource "nomad_job" "lldap" {
detach = false detach = false
} }
# Give access to ldap secrets
resource "nomad_acl_policy" "lldap_ldap_secrets" {
name = "lldap-secrets-ldap"
description = "Give access to LDAP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/ldap/*" {
capabilities = ["read"]
}
path "secrets/ldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "lldap_ldap_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "lldap_ldap_psk" {
path = "secrets/ldap/allowed_psks/ldap"
items = {
psk = "lldap:${resource.random_password.lldap_ldap_psk.result}"
}
}
# Give access to smtp secrets # Give access to smtp secrets
resource "nomad_acl_policy" "lldap_smtp_secrets" { resource "nomad_acl_policy" "lldap_smtp_secrets" {
name = "lldap-secrets-smtp" name = "lldap-secrets-smtp"
@ -60,7 +24,6 @@ namespace "default" {
EOH EOH
job_acl { job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap" job_id = "lldap"
group = "lldap" group = "lldap"
task = "lldap" task = "lldap"
@ -82,7 +45,6 @@ namespace "default" {
EOH EOH
job_acl { job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap" job_id = "lldap"
group = "lldap" group = "lldap"
task = "bootstrap" task = "bootstrap"
@ -115,9 +77,27 @@ namespace "default" {
EOH EOH
job_acl { job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap" job_id = "lldap"
group = "lldap" group = "lldap"
task = "stunnel" task = "stunnel"
} }
} }
# Give access to all ldap secrets
resource "nomad_acl_policy" "secrets_ldap" {
name = "secrets-ldap"
description = "Give access to Postgres secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/ldap/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = resource.nomad_job.lldap.id
}
}

62
databases/main.tf Normal file
View File

@ -0,0 +1,62 @@
resource "nomad_job" "mysql-server" {
jobspec = file("${path.module}/mysql.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_acl_policy" "secrets_mysql" {
name = "secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = resource.nomad_job.mysql-server.id
}
}
resource "nomad_job" "postgres-server" {
jobspec = file("${path.module}/postgres.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_acl_policy" "secrets_postgres" {
name = "secrets-postgres"
description = "Give access to Postgres secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = resource.nomad_job.postgres-server.id
}
}
resource "nomad_job" "redis" {
for_each = toset(["blocky", "authelia"])
jobspec = templatefile("${path.module}/redis.nomad",
{
name = each.key,
}
)
# Block until deployed as there are servics dependent on this one
detach = false
}

View File

@ -3,10 +3,6 @@ job "mysql-server" {
type = "service" type = "service"
priority = 80 priority = 80
update {
auto_revert = true
}
group "mysql-server" { group "mysql-server" {
count = 1 count = 1
@ -77,7 +73,7 @@ MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
resources { resources {
cpu = 300 cpu = 300
memory = 1600 memory = 1536
} }
} }
@ -85,9 +81,9 @@ MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/stunnel:1.0.0" image = "alpine:3.17"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
args = ["/bin/sh", "${NOMAD_TASK_DIR}/start.sh"]
} }
resources { resources {
@ -95,6 +91,15 @@ MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
memory = 100 memory = 100
} }
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel ${NOMAD_TASK_DIR}/stunnel.conf
EOF
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template { template {
data = <<EOF data = <<EOF
syslog = no syslog = no

View File

@ -1,41 +0,0 @@
resource "nomad_job" "mysql-server" {
jobspec = file("${path.module}/mysql.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_acl_policy" "secrets_mysql" {
name = "secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
path "secrets/mysql/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.mysql-server.id
job_id = "mysql-server"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "mysql_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "mysql_mysql_psk" {
path = "secrets/mysql/allowed_psks/mysql"
items = {
psk = "mysql:${resource.random_password.mysql_mysql_psk.result}"
}
}

View File

@ -3,10 +3,6 @@ job "postgres-server" {
type = "service" type = "service"
priority = 80 priority = 80
update {
auto_revert = true
}
group "postgres-server" { group "postgres-server" {
count = 1 count = 1
@ -77,8 +73,7 @@ POSTGRES_PASSWORD={{ .superuser_pass }}
resources { resources {
cpu = 500 cpu = 500
memory = 800 memory = 400
memory_max = 1500
} }
} }
@ -86,9 +81,9 @@ POSTGRES_PASSWORD={{ .superuser_pass }}
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/stunnel:1.0.0" image = "alpine:3.17"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
args = ["/bin/sh", "${NOMAD_TASK_DIR}/start.sh"]
} }
resources { resources {
@ -96,6 +91,15 @@ POSTGRES_PASSWORD={{ .superuser_pass }}
memory = 100 memory = 100
} }
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel ${NOMAD_TASK_DIR}/stunnel.conf
EOF
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template { template {
data = <<EOF data = <<EOF
syslog = no syslog = no

View File

@ -1,41 +0,0 @@
resource "nomad_job" "postgres-server" {
jobspec = file("${path.module}/postgres.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_acl_policy" "secrets_postgres" {
name = "secrets-postgres"
description = "Give access to Postgres secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres" {
capabilities = ["read"]
}
path "secrets/postgres/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.postgres-server.id
job_id = "postgres-server"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "postgres_postgres_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "postgres_postgres_psk" {
path = "secrets/postgres/allowed_psks/postgres"
items = {
psk = "postgres:${resource.random_password.postgres_postgres_psk.result}"
}
}

View File

@ -3,10 +3,6 @@ job "redis-${name}" {
type = "service" type = "service"
priority = 80 priority = 80
update {
auto_revert = true
}
group "cache" { group "cache" {
count = 1 count = 1
@ -39,7 +35,7 @@ job "redis-${name}" {
resources { resources {
cpu = 100 cpu = 100
memory = 64 memory = 128
memory_max = 512 memory_max = 512
} }
} }
@ -48,14 +44,23 @@ job "redis-${name}" {
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/stunnel:1.0.0" image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
args = ["/bin/sh", "$${NOMAD_TASK_DIR}/start.sh"]
} }
resources { resources {
cpu = 50 cpu = 100
memory = 15 memory = 100
}
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel $${NOMAD_TASK_DIR}/stunnel.conf
EOF
destination = "$${NOMAD_TASK_DIR}/start.sh"
} }
template { template {

View File

@ -1,12 +0,0 @@
resource "nomad_job" "redis" {
for_each = toset(["blocky", "authelia"])
jobspec = templatefile("${path.module}/redis.nomad",
{
name = each.key,
}
)
# Block until deployed as there are servics dependent on this one
detach = false
}

View File

@ -1,5 +1,5 @@
pre-commit pre-commit
detect-secrets==1.4.0 # This should match what is in .pre-commit-config.yaml detect-secrets==1.4.0 # This should match what is in .pre-commit-config.yaml
ansible ansible
python-nomad python-consul
netaddr hvac

View File

@ -56,10 +56,6 @@ for job in nomad_req("jobs"):
if job["Type"] in ("batch", "sysbatch"): if job["Type"] in ("batch", "sysbatch"):
continue continue
if job["Status"] != "running":
print(f"WARNING: job {job['Name']} is {job['Status']}")
continue
job_detail = nomad_req("job", job["ID"]) job_detail = nomad_req("job", job["ID"])
job_detail = cast(dict[str, Any], job_detail) job_detail = cast(dict[str, Any], job_detail)
@ -87,7 +83,7 @@ for job in nomad_req("jobs"):
restart_allocs: set[str] = set() restart_allocs: set[str] = set()
for allocation in nomad_req("job", job_detail["ID"], "allocations"): for allocation in nomad_req("job", job_detail["ID"], "allocations"):
allocation = cast(dict[str, Any], allocation) allocation = cast(dict[str, Any], allocation)
if allocation["ClientStatus"] == "running" and allocation["TaskGroup"] in restart_groups: if allocation["TaskGroup"] in restart_groups:
restart_allocs.add(allocation["ID"]) restart_allocs.add(allocation["ID"])
# Restart allocs associated with missing services # Restart allocs associated with missing services

View File

@ -57,11 +57,9 @@ for namespace in nomad_req("services"):
except requests.exceptions.HTTPError as e: except requests.exceptions.HTTPError as e:
if e.response.status_code == 404: if e.response.status_code == 404:
alloc_found = False alloc_found = False
message = f"alloc {alloc_id} not found for {service_name}." print(
if args.delete: f"alloc {alloc_id} not found for {service_name}. Deleting {service_id}"
message += f" Deleting {service_id}" )
print(message)
else: else:
raise e raise e

View File

@ -2,39 +2,39 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "2.1.1" version = "2.0.0"
hashes = [ hashes = [
"h1:liQBgBXfQEYmwpoGZUfSsu0U0t/nhvuRZbMhaMz7wcQ=", "h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:28bc6922e8a21334568410760150d9d413d7b190d60b5f0b4aab2f4ef52efeeb", "zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:2d4283740e92ce1403875486cd5ff2c8acf9df28c190873ab4d769ce37db10c1", "zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:457e16d70075eae714a7df249d3ba42c2176f19b6750650152c56f33959028d9", "zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:49ee88371e355c00971eefee6b5001392431b47b3e760a5c649dda76f59fb8fa",
"zh:614ad3bf07155ed8a5ced41dafb09042afbd1868490a379654b3e970def8e33d",
"zh:75be7199d76987e7549e1f27439922973d1bf27353b44a593bfbbc2e3b9f698f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:888e14a24410d56b37212fbea373a3e0401d0ff8f8e4f4dd00ba8b29de9fed39", "zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:aa261925e8b152636a0886b3a2149864707632d836e98a88dacea6cfb6302082", "zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:ac10cefb4064b3bb63d4b0379624a416a45acf778eac0004466f726ead686196", "zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:b1a3c8b4d5b2dc9b510eac5e9e02665582862c24eb819ab74f44d3d880246d4f", "zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:c552e2fe5670b6d3ad9a5faf78e3a27197eeedbe2b13928d2c491fa509bc47c7", "zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
] ]
} }
provider "registry.terraform.io/hashicorp/random" { provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0" version = "3.5.1"
hashes = [ hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=", "h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d", "zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211", "zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829", "zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d", "zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055", "zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17", "zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21", "zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839", "zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0", "zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c", "zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e", "zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
] ]
} }

58
services/bazarr.tf Normal file
View File

@ -0,0 +1,58 @@
module "bazarr" {
source = "./service"
name = "bazarr"
image = "lscr.io/linuxserver/bazarr:1.2.4"
resources = {
cpu = 150
memory = 400
}
ingress = true
service_port = 6767
use_wesher = var.use_wesher
use_postgres = true
postgres_bootstrap = {
enabled = true
}
env = {
PGID = 100
PUID = 1001
TZ = "America/Los_Angeles"
}
host_volumes = [
{
name = "bazarr-config"
dest = "/config"
read_only = false
},
{
name = "media-write"
dest = "/media"
read_only = false
},
]
templates = [
{
data = <<EOF
{{ with nomadVar "nomad/jobs/bazarr" -}}
POSTGRES_ENABLED=True
POSTGRES_HOST=127.0.0.1
POSTGRES_PORT=5432
POSTGRES_DATABASE={{ .db_name }}
POSTGRES_USERNAME={{ .db_user }}
POSTGRES_PASSWORD={{ .db_pass }}
{{- end }}
EOF
dest_prefix = "$${NOMAD_SECRETS_DIR}/"
dest = "env"
env = true
mount = false
},
]
}

View File

@ -1,44 +0,0 @@
module "wishlist" {
source = "./service"
name = "wishlist"
image = "wingysam/christmas-community:latest"
ingress = true
service_port = 80
use_wesher = var.use_wesher
host_volumes = [
{
name = "christmas-community"
dest = "/data"
read_only = false
},
]
templates = [
{
data = <<EOF
{{ with nomadVar "nomad/jobs/wishlist" -}}
GUEST_PASSWORD={{ .guest_password }}
{{ end -}}
{{ with nomadService "traefik" -}}
{{- $last := len . | subtract 1 -}}
{{- $services := . -}}
TRUST_PROXY={{ range $i := loop $last -}}
{{- with index $services $i }}{{ .Address }},{{ end -}}
{{- end -}}
{{- with index . $last }}{{ .Address }}{{ end -}}
{{- end }}
EOF
dest = "env"
dest_prefix = "$${NOMAD_SECRETS_DIR}/"
env = true
},
]
resources = {
cpu = 100
memory = 200
}
}

View File

@ -2,7 +2,7 @@ module "diun" {
source = "./service" source = "./service"
name = "diun" name = "diun"
image = "crazymax/diun:4.28" image = "crazymax/diun:4.26"
args = ["serve", "--log-level=debug"] args = ["serve", "--log-level=debug"]
sticky_disk = true sticky_disk = true
@ -13,16 +13,13 @@ module "diun" {
DIUN_PROVIDERS_NOMAD_WATCHBYDEFAULT = true DIUN_PROVIDERS_NOMAD_WATCHBYDEFAULT = true
DIUN_DEFAULTS_WATCHREPO = true DIUN_DEFAULTS_WATCHREPO = true
DIUN_DEFAULTS_SORTTAGS = "semver" DIUN_DEFAULTS_SORTTAGS = "semver"
DIUN_DEFAULTS_INCLUDETAGS = "^\\d+(\\.\\d+){0,2}$" DIUN_DEFAUTLS_INCLUDETAGS = "^\\d+(\\.\\d+){0,2}$"
# Nomad API # Nomad API
NOMAD_ADDR = "unix:///secrets/api.sock" # TODO: Use socket in $NOMAD_SECRETS_DIR/api.sock when we can assign workload ACLs with Terraform to
DIUN_PROVIDERS_NOMAD = true # allow read access. Will need to update template to allow passing token by env
DIUN_PROVIDERS_NOMAD_SECRETID = "$${NOMAD_TOKEN}" NOMAD_ADDR = "http://$${attr.unique.network.ip-address}:4646/"
} DIUN_PROVIDERS_NOMAD = true
task_identity = {
env = true
} }
templates = [ templates = [
@ -38,17 +35,18 @@ module "diun" {
mount = false mount = false
}, },
] ]
}
resource "nomad_acl_policy" "diun_query_jobs" { workload_acl_policy = {
name = "diun-query-jobs" name = "diun-read"
description = "Allow diun to query jobs" description = "Give the diun task read access to jobs"
rules_hcl = <<EOH
rules_hcl = <<EOH
namespace "default" { namespace "default" {
capabilities = ["list-jobs", "read-job"] capabilities = [
"list-jobs",
"read-job",
]
} }
EOH EOH
job_acl {
job_id = module.diun.job_id
} }
} }

View File

@ -1,168 +0,0 @@
module "gitea" {
source = "./service"
name = "git"
image = "gitea/gitea:1.21"
resources = {
cpu = 200
memory = 512
}
env = {
# Custom files should be part of the task
GITEA_WORK_DIR = "$${NOMAD_TASK_DIR}"
GITEA_CUSTOM = "$${NOMAD_TASK_DIR}/custom"
}
ingress = true
service_port = 3000
use_wesher = var.use_wesher
ports = [
{
name = "ssh"
to = 22
}
]
service_check = {
path = "/api/healthz"
}
custom_services = [
{
name = "git-ssh"
port = "ssh"
tags = [
"traefik.enable=true",
"traefik.tcp.routers.git-ssh.entryPoints=gitssh",
"traefik.tcp.routers.git-ssh.rule=HostSNI(`*`)",
"traefik.tcp.routers.git-ssh.tls=false",
]
},
]
use_smtp = true
mysql_bootstrap = {
enabled = true
}
oidc_client_config = {
description = "Gitea"
redirect_uris = [
"https://git.thefij.rocks/user/oauth2/authelia/callback",
]
scopes = ["openid", "email", "profile"]
}
host_volumes = [
{
name = "gitea-data"
dest = "/data"
read_only = false
},
]
# TODO: Bootstrap OIDC with
# su -- git gitea admin auth add-oauth --name authelia --provider openidConnect --key gitea --secret "{{ .oidc_secret }}" --auto-discover-url https://authelia.thefij.rocks/.well-known/openid-configuration --skip-local-2fa
templates = [
{
data = <<EOF
{{ with nomadVar "nomad/jobs/git" }}
GITEA__server__DOMAIN=git.thefij.rocks
GITEA__server__SSH_PORT=2222
GITEA__server__ROOT_URL=https://git.thefij.rocks
GITEA__security__INSTALL_LOCK=true
GITEA__database__DB_TYPE=mysql
GITEA__database__HOST=127.0.0.1:3306
GITEA__database__NAME={{ .db_name }}
GITEA__database__USER={{ .db_user }}
GITEA__service__DISABLE_REGISTRATION=false
GITEA__service__ALLOW_ONLY_EXTERNAL_REGISTRATION=true
GITEA__service__SHOW_REGISTRATION_BUTTON=false
GITEA__openid__ENABLE_OPENID_SIGNIN=true
GITEA__openid__ENABLE_OPENID_SIGNUP=true
GITEA__openid__WHITELISTED_URIS=authelia.thefij.rocks
GITEA__log__ROOT_PATH={{ env "NOMAD_TASK_DIR" }}/log
GITEA__mailer__ENABLED=true
GITEA__mailer__FROM={{ .smtp_sender }}
GITEA__session__provider=db
{{ end }}
EOF
env = true
mount = false
dest = "env"
},
# TODO: Gitea writes these out to the ini file in /local anyway
# Find some way to get it to write to /secrets
{
data = <<EOF
{{ with nomadVar "nomad/jobs/git" }}
GITEA__security__SECRET_KEY="{{ .secret_key }}"
GITEA__database__PASSWD={{ .db_pass }}
{{ end }}
{{ with nomadVar "secrets/smtp" }}
GITEA__mailer__SMTP_ADDR={{ .server }}
GITEA__mailer__SMTP_PORT={{ .port }}
GITEA__mailer__USER={{ .user }}
GITEA__mailer__PASSWD={{ .password }}
{{ end }}
EOF
env = true
mount = false
dest = "env"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
},
{
data = <<EOF
{{ with nomadVar "secrets/authelia/git" -}}
CLIENT_ID={{ .client_id }}
SECRET={{ .secret }}
{{- end }}
EOF
dest = "oauth.env"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
mount = false
change_mode = "script"
change_script = {
command = "/local/bootstrap_auth.sh"
}
},
{
data = <<EOF
#! /bin/bash
source {{ env "NOMAD_SECRETS_DIR" }}/oauth.env
auth_provider_id=$(su -- git gitea admin auth list | awk '/authelia/ { print $1 }')
if [ -z "$auth_provider_id" ]; then
echo "Creating Authelia OAuth provider"
su -- git gitea admin auth add-oauth \
--name authelia \
--provider openidConnect \
--key "$CLIENT_ID" \
--secret "$SECRET" \
--auto-discover-url https://authelia.thefij.rocks/.well-known/openid-configuration \
--skip-local-2fa
else
echo "Updating Authelia OAuth provider"
su -- git gitea admin auth update-oauth \
--id $auth_provider_id \
--key "$CLIENT_ID" \
--secret "$SECRET"
fi
EOF
dest = "bootstrap_auth.sh"
perms = "777"
change_mode = "noop"
mount = false
},
]
}

212
services/ip-dvr.nomad Normal file
View File

@ -0,0 +1,212 @@
job "ipdvr" {
region = "global"
datacenters = ["dc1"]
type = "service"
group "sabnzbd" {
network {
mode = "bridge"
port "main" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 8080
}
}
volume "sabnzbd-config" {
type = "host"
read_only = false
source = "sabnzbd-config"
}
volume "media-downloads" {
type = "host"
read_only = false
source = "media-downloads"
}
service {
name = "sabnzbd"
provider = "nomad"
port = "main"
tags = [
"traefik.enable=true",
"traefik.http.routers.sabnzbd.entryPoints=websecure",
]
}
task "sabnzbd" {
driver = "docker"
config {
image = "linuxserver/sabnzbd"
ports = ["main"]
}
env = {
"PGID" = 100
"PUID" = 1001
"TZ" = "America/Los_Angeles"
}
volume_mount {
volume = "sabnzbd-config"
destination = "/config"
read_only = false
}
volume_mount {
volume = "media-downloads"
destination = "/downloads"
read_only = false
}
resources {
cpu = 400
memory = 500
memory_max = 800
}
}
}
group "nzbget" {
network {
mode = "bridge"
port "main" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
static = 6789
}
}
volume "nzbget-config" {
type = "host"
read_only = false
source = "nzbget-config"
}
volume "media-downloads" {
type = "host"
read_only = false
source = "media-downloads"
}
service {
name = "nzbget"
provider = "nomad"
port = "main"
tags = [
"traefik.enable=true",
"traefik.http.routers.nzbget.entryPoints=websecure",
]
}
task "nzbget" {
driver = "docker"
config {
image = "linuxserver/nzbget"
ports = ["main"]
}
env = {
"PGID" = 100
"PUID" = 1001
"TZ" = "America/Los_Angeles"
}
volume_mount {
volume = "nzbget-config"
destination = "/config"
read_only = false
}
volume_mount {
volume = "media-downloads"
destination = "/downloads"
read_only = false
}
resources {
cpu = 200
memory = 300
memory_max = 500
}
}
}
group "sonarr" {
network {
mode = "bridge"
port "main" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 8989
}
}
volume "sonarr-data" {
type = "host"
read_only = false
source = "sonarr-data"
}
volume "media-write" {
type = "host"
read_only = false
source = "media-write"
}
service {
name = "sonarr"
provider = "nomad"
port = "main"
tags = [
"traefik.enable=true",
"traefik.http.routers.sonarr.entryPoints=websecure",
]
}
task "sonarr" {
driver = "docker"
config {
image = "lscr.io/linuxserver/sonarr:3.0.10"
ports = ["main"]
}
env = {
"PGID" = 100
"PUID" = 1001
"TZ" = "America/Los_Angeles"
}
volume_mount {
volume = "sonarr-data"
destination = "/config"
read_only = false
}
volume_mount {
volume = "media-write"
destination = "/media"
read_only = false
}
resources {
cpu = 100
memory = 500
memory_max = 700
}
}
}
}

View File

@ -1,25 +0,0 @@
module "languagetool" {
source = "./service"
name = "languagetool"
image = "ghcr.io/erikvl87/docker-languagetool/languagetool:4.8"
ingress = true
service_port = 8010
use_wesher = var.use_wesher
env = {
Java_Xmx = "512m"
}
service_check = {
path = "/v2/healthcheck"
}
# Possibility to use a volume over nfs to host n-gram datasets
# https://github.com/Erikvl87/docker-languagetool/pkgs/container/docker-languagetool%2Flanguagetool#using-n-gram-datasets
resources = {
cpu = 100
memory = 512
}
}

View File

@ -40,4 +40,9 @@ module "lidarr" {
cpu = 500 cpu = 500
memory = 1500 memory = 1500
} }
stunnel_resources = {
cpu = 100
memory = 100
}
} }

View File

@ -0,0 +1,5 @@
resource "nomad_job" "ipdvr" {
jobspec = templatefile("${path.module}/ip-dvr.nomad", {
use_wesher = var.use_wesher,
})
}

View File

@ -11,11 +11,25 @@ monitors:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
- 'https://my.iamthefij.com' - 'https://my.iamthefij.com'
- name: Grafana (public)
command:
- '/app/scripts/curl_ok.sh'
- 'https://grafana.iamthefij.com'
- name: Grafana (internal) - name: Grafana (internal)
command: command:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
- 'https://grafana.thefij.rocks' - 'https://grafana.thefij.rocks'
- name: Plex
command:
- 'curl'
- '--silent'
- '--show-error'
- '-o'
- '/dev/null'
- 'http://192.168.2.10:32400'
- name: NZBget - name: NZBget
command: command:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
@ -31,11 +45,6 @@ monitors:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
- 'https://lidarr.thefij.rocks' - 'https://lidarr.thefij.rocks'
- name: Radarr
command:
- '/app/scripts/curl_ok.sh'
- 'https://radarr.thefij.rocks'
- name: Authelia - name: Authelia
command: command:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
@ -46,20 +55,6 @@ monitors:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
- 'https://photoprism.thefij.rocks' - 'https://photoprism.thefij.rocks'
- name: Prometheus
command:
- '/app/scripts/curl_ok.sh'
- 'https://prometheus.thefij.rocks'
- name: Plex
command:
- 'curl'
- '--silent'
- '--show-error'
- '-o'
- '/dev/null'
- 'http://192.168.2.10:32400'
alerts: alerts:
log: log:
command: command:

View File

@ -1,13 +1,12 @@
module "minitor" { module "minitor" {
source = "./service" source = "./service"
name = "minitor" name = "minitor"
image = "iamthefij/minitor-go:1.4.1" image = "iamthefij/minitor-go:1.4.1"
args = ["-metrics", "-config=$${NOMAD_TASK_DIR}/config.yml"] args = ["-metrics", "-config=$${NOMAD_TASK_DIR}/config.yml"]
service_port = 8080 service_port = 8080
service_check = null use_wesher = var.use_wesher
use_wesher = var.use_wesher prometheus = true
prometheus = true
env = { env = {
TZ = "America/Los_Angeles", TZ = "America/Los_Angeles",

View File

@ -1,63 +0,0 @@
job "fixers" {
type = "batch"
periodic {
cron = "*/15 * * * * *"
prohibit_overlap = true
}
meta = {
"diun.enable" = false
}
group "orphaned_services" {
task "orphaned_services" {
driver = "docker"
config {
image = "iamthefij/nomad-service-fixers:0.1.1"
command = "/scripts/nomad_orphan_services.py"
args = ["--delete"]
}
env = {
NOMAD_ADDR = "http+unix://%2Fsecrets%2Fapi.sock"
}
identity {
env = true
}
resources {
cpu = 50
memory = 100
}
}
}
group "missing_services" {
task "missing_services" {
driver = "docker"
config {
image = "iamthefij/nomad-service-fixers:0.1.1"
command = "/scripts/nomad_missing_services.py"
args = ["--restart"]
}
env = {
NOMAD_ADDR = "http+unix://%2Fsecrets%2Fapi.sock"
}
identity {
env = true
}
resources {
cpu = 50
memory = 100
}
}
}
}

View File

@ -1,22 +0,0 @@
resource "nomad_job" "nomad-fixers" {
jobspec = file("${path.module}/nomad-fixers.nomad")
}
resource "nomad_acl_policy" "nomad_fixers_workload" {
name = "nomad-fixers-workload"
description = "Give nomad fixers access to the Nomad api for fixing things"
rules_hcl = <<EOH
namespace "default" {
capabilities = [
"list-jobs",
"read-job",
"submit-job", # This allows deleting a service registeration
"alloc-lifecycle",
]
}
EOH
job_acl {
job_id = "fixers/*"
}
}

View File

@ -1,35 +0,0 @@
module "nzbget" {
source = "./service"
name = "nzbget"
image = "linuxserver/nzbget:v21.1-ls138"
ingress = true
service_port = 6789
use_wesher = var.use_wesher
env = {
PGID = 100
PUID = 1001
TZ = "America/Los_Angeles"
}
host_volumes = [
{
name = "nzbget-config"
dest = "/config"
read_only = false
},
{
name = "media-downloads"
dest = "/downloads"
read_only = false
},
]
resources = {
cpu = 200
memory = 300
memory_max = 500
}
}

View File

@ -1,25 +1,28 @@
module "photoprism" { module "photoprism_module" {
source = "./service" source = "./service"
name = "photoprism" name = "photoprism"
image = "photoprism/photoprism:240711" image = "photoprism/photoprism:221118-jammy"
image_pull_timeout = "10m" image_pull_timeout = "10m"
# constraints = [{ constraints = [{
# attribute = "$${meta.hw_transcode.type}" attribute = "$${meta.hw_transcode.type}"
# # operator = "is_set" # operator = "is_set"
# value = "raspberry" value = "raspberry"
# }] }]
priority = 60
# docker_devices = [{ docker_devices = [{
# host_path = "$${meta.hw_transcode.device}" host_path = "$${meta.hw_transcode.device}"
# container_path = "$${meta.hw_transcode.device}" container_path = "$${meta.hw_transcode.device}"
# }] }]
resources = { resources = {
cpu = 1500 cpu = 2000
memory = 2200 memory = 2500
memory_max = 4000 memory_max = 4000
} }
stunnel_resources = {
cpu = 100
memory = 100
}
sticky_disk = true sticky_disk = true
host_volumes = [ host_volumes = [
{ {
@ -37,21 +40,18 @@ module "photoprism" {
ingress = true ingress = true
service_port = 2342 service_port = 2342
use_wesher = var.use_wesher use_wesher = var.use_wesher
ingress_middlewares = [
"authelia@nomad"
]
mysql_bootstrap = { mysql_bootstrap = {
enabled = true enabled = true
} }
oidc_client_config = {
description = "Photoprism"
redirect_uris = [
"https://photoprism.thefij.rocks/api/v1/oidc/redirect",
]
scopes = ["openid", "email", "profile"]
}
env = { env = {
PHOTOPRISM_DEBUG = true PHOTOPRISM_DEBUG = true
# Make public since we added Authelia at the proxy level
PHOTOPRISM_AUTH_MODE = "public"
# UI # UI
PHOTOPRISM_SITE_CAPTION = "AI-Powered Photos App" PHOTOPRISM_SITE_CAPTION = "AI-Powered Photos App"
PHOTOPRISM_SITE_DESCRIPTION = "Fijolek home photos" PHOTOPRISM_SITE_DESCRIPTION = "Fijolek home photos"
@ -59,8 +59,7 @@ module "photoprism" {
PHOTOPRISM_SITE_URL = "https://photoprism.${var.base_hostname}/" PHOTOPRISM_SITE_URL = "https://photoprism.${var.base_hostname}/"
PHOTOPRISM_SPONSOR = "true" PHOTOPRISM_SPONSOR = "true"
# Worker config # Worker config
PHOTOPRISM_WORKERS = 2 PHOTOPRISM_WORKERS = 2
PHOTOPRISM_BACKUP_DATABASE = false
# Paths # Paths
PHOTOPRISM_ORIGINALS_PATH = "/photoprism-media/Library" PHOTOPRISM_ORIGINALS_PATH = "/photoprism-media/Library"
PHOTOPRISM_IMPORT_PATH = "/photoprism-media/Import" PHOTOPRISM_IMPORT_PATH = "/photoprism-media/Import"
@ -69,12 +68,6 @@ module "photoprism" {
PHOTOPRISM_UID = 500 PHOTOPRISM_UID = 500
PHOTOPRISM_GID = 100 PHOTOPRISM_GID = 100
PHOTOPRISM_UMASK = 0000 PHOTOPRISM_UMASK = 0000
# OIDC
PHOTOPRISM_OIDC_URI = "https://authelia.thefij.rocks"
PHOTOPRISM_OIDC_PROVIDER = "Authelia"
PHOTOPRISM_OIDC_REGISTER = true
PHOTOPRISM_OIDC_REDIRECT = true
PHOTOPRISM_OIDC_SCOPES = "openid email profile"
} }
templates = [ templates = [
@ -96,10 +89,6 @@ module "photoprism" {
PHOTOPRISM_FFMPEG_ENCODER=intel PHOTOPRISM_FFMPEG_ENCODER=intel
PHOTOPRISM_INIT="intel tensorflow" PHOTOPRISM_INIT="intel tensorflow"
{{- end }} {{- end }}
{{ with nomadVar "secrets/authelia/photoprism" -}}
PHOTOPRISM_OIDC_CLIENT={{ .client_id }}
PHOTOPRISM_OIDC_SECRET={{ .secret }}
{{- end }}
EOF EOF
dest_prefix = "$${NOMAD_SECRETS_DIR}/" dest_prefix = "$${NOMAD_SECRETS_DIR}/"
dest = "env" dest = "env"
@ -107,13 +96,4 @@ module "photoprism" {
mount = false mount = false
}, },
] ]
actions = [
{
name = "import"
command = "photoprism"
args = ["import", "/photoprism-media/Import"]
cron = "0 0 3 * * * *"
},
]
} }

View File

@ -1,59 +0,0 @@
module "radarr" {
source = "./service"
name = "radarr"
image = "lscr.io/linuxserver/radarr:5.2.6"
ingress = true
service_port = 7878
use_wesher = var.use_wesher
ingress_middlewares = [
"authelia@nomad"
]
use_postgres = true
postgres_bootstrap = {
enabled = true
databases = [
"radarr",
"radarr-logs",
]
}
env = {
PGID = 100
PUID = 1001
TZ = "America/Los_Angeles"
}
host_volumes = [
{
name = "radarr-config"
dest = "/config"
read_only = false
},
{
name = "media-write"
dest = "/media"
read_only = false
},
]
resources = {
cpu = 500
memory = 500
memory_max = 700
}
}
resource "nomad_variable" "authelia_service_rules_radarr" {
path = "authelia/access_control/service_rules/radarr"
items = {
name = "radarr"
rule = <<EOH
policy: bypass
resources:
- '^/api([/?].*)?$'
EOH
}
}

View File

@ -1,10 +1,3 @@
locals {
port_names = concat(
var.service_port != null ? ["main"] : [],
[for port in var.ports : port.name if port.task_config],
)
}
resource "nomad_job" "service" { resource "nomad_job" "service" {
jobspec = templatefile("${path.module}/service_template.nomad", { jobspec = templatefile("${path.module}/service_template.nomad", {
name = var.name name = var.name
@ -15,19 +8,14 @@ resource "nomad_job" "service" {
args = var.args args = var.args
env = var.env env = var.env
task_meta = var.task_meta task_meta = var.task_meta
task_identity = var.task_identity
group_meta = var.group_meta group_meta = var.group_meta
job_meta = var.job_meta job_meta = var.job_meta
constraints = var.constraints constraints = var.constraints
docker_devices = var.docker_devices docker_devices = var.docker_devices
user = var.user
actions = var.actions
service_port = var.service_port service_port = var.service_port
service_port_static = var.service_port_static service_port_static = var.service_port_static
service_check = var.service_check
ports = var.ports ports = var.ports
port_names = local.port_names
sticky_disk = var.sticky_disk sticky_disk = var.sticky_disk
resources = var.resources resources = var.resources
stunnel_resources = var.stunnel_resources stunnel_resources = var.stunnel_resources
@ -50,11 +38,27 @@ resource "nomad_job" "service" {
mysql_bootstrap = var.mysql_bootstrap mysql_bootstrap = var.mysql_bootstrap
postgres_bootstrap = var.postgres_bootstrap postgres_bootstrap = var.postgres_bootstrap
workload_identity_env = var.workload_acl_policy != null
}) })
detach = var.detach detach = var.detach
} }
resource "nomad_acl_policy" "workload_special" {
count = var.workload_acl_policy != null ? 1 : 0
name = var.workload_acl_policy.name
description = var.workload_acl_policy.description
rules_hcl = var.workload_acl_policy.rules_hcl
job_acl {
job_id = var.name
group = var.name
task = var.name
}
}
resource "nomad_acl_policy" "secrets_mysql" { resource "nomad_acl_policy" "secrets_mysql" {
count = var.use_mysql || var.mysql_bootstrap != null ? 1 : 0 count = var.use_mysql || var.mysql_bootstrap != null ? 1 : 0
@ -71,7 +75,7 @@ namespace "default" {
EOH EOH
job_acl { job_acl {
job_id = resource.nomad_job.service.id job_id = var.name
group = var.name group = var.name
task = "mysql-bootstrap" task = "mysql-bootstrap"
} }
@ -109,7 +113,7 @@ namespace "default" {
EOH EOH
job_acl { job_acl {
job_id = resource.nomad_job.service.id job_id = var.name
group = var.name group = var.name
task = "stunnel" task = "stunnel"
} }
@ -131,7 +135,7 @@ namespace "default" {
EOH EOH
job_acl { job_acl {
job_id = resource.nomad_job.service.id job_id = var.name
group = var.name group = var.name
task = "postgres-bootstrap" task = "postgres-bootstrap"
} }
@ -169,7 +173,7 @@ namespace "default" {
EOH EOH
job_acl { job_acl {
job_id = resource.nomad_job.service.id job_id = var.name
group = var.name group = var.name
task = "stunnel" task = "stunnel"
} }
@ -207,7 +211,7 @@ namespace "default" {
EOH EOH
job_acl { job_acl {
job_id = resource.nomad_job.service.id job_id = var.name
group = var.name group = var.name
task = "stunnel" task = "stunnel"
} }
@ -229,58 +233,8 @@ namespace "default" {
EOH EOH
job_acl { job_acl {
job_id = resource.nomad_job.service.id job_id = var.name
group = var.name group = var.name
task = var.name task = var.name
} }
} }
module "oidc_client" {
count = var.oidc_client_config != null ? 1 : 0
source = "../../core/oidc_client"
name = var.name
oidc_client_config = {
description = var.oidc_client_config.description
authorization_policy = var.oidc_client_config.authorization_policy
redirect_uris = var.oidc_client_config.redirect_uris
scopes = var.oidc_client_config.scopes
}
job_acl = {
job_id = resource.nomad_job.service.id
group = var.name
task = var.name
}
}
# Action cron jobs
resource "nomad_job" "action_cron" {
for_each = tomap({ for action in var.actions : action.name => action if action.cron != null })
jobspec = templatefile("${path.module}/service_scheduled.nomad", {
name = var.name
action_name = each.value.name
action_cron = each.value.cron
})
}
resource "nomad_acl_policy" "action_cron_workload_policy" {
for_each = resource.nomad_job.action_cron
name = "service-action-${each.value.id}"
description = "Give custom service cron actions access to execute actions."
rules_hcl = <<EOH
namespace "default" {
capabilities = [
"list-jobs",
"read-job",
"alloc-exec",
]
}
EOH
job_acl {
job_id = each.value.id
}
}

View File

@ -1,3 +0,0 @@
output "job_id" {
value = resource.nomad_job.service.id
}

View File

@ -1,39 +0,0 @@
job "${name}-${action_name}" {
region = "global"
datacenters = ["dc1"]
type = "batch"
periodic {
cron = "${action_cron}"
}
group "main" {
task "${action_name}" {
driver = "docker"
config {
image = "hashicorp/nomad:$${attr.nomad.version}"
args = [
"job",
"action",
"-job",
"${name}",
"-group",
"${name}",
"-task",
"${name}",
"${action_name}"
]
}
env = {
NOMAD_ADDR = "unix:///secrets/api.sock"
}
identity {
env = true
}
}
}
}

View File

@ -5,10 +5,6 @@ job "${name}" {
type = "service" type = "service"
priority = ${priority} priority = ${priority}
update {
auto_revert = true
}
group "${name}" { group "${name}" {
count = ${count} count = ${count}
%{~ if length(job_meta) > 0 } %{~ if length(job_meta) > 0 }
@ -35,18 +31,10 @@ job "${name}" {
%{~ endif ~} %{~ endif ~}
%{~ for port in ports } %{~ for port in ports }
port "${port.name}" { port "${port.name}" {
%{~ if port.host_network != null ~} %{ if port.host_network != null }host_network = "${port.host_network}"%{ endif ~}
host_network = "${port.host_network}" %{ if port.from != null }to = ${port.from}%{ endif ~}
%{~ endif ~} %{ if port.to != null }to = ${port.to}%{ endif ~}
%{~ if port.from != null ~} %{ if port.static != null }static = ${port.static}%{ endif ~}
from = ${port.from}
%{~ endif ~}
%{~ if port.to != null ~}
to = ${port.to}
%{~ endif ~}
%{~ if port.static != null ~}
static = ${port.static}
%{~ endif ~}
} }
%{~ endfor ~} %{~ endfor ~}
} }
@ -77,12 +65,45 @@ job "${name}" {
source = "${host_volume.name}" source = "${host_volume.name}"
} }
%{~ endfor ~} %{~ endfor ~}
%{~ if service_port != null }
service {
name = "${replace(name, "_", "-")}"
provider = "nomad"
port = "main"
tags = [
%{~ if prometheus == true ~}
"prometheus.scrape",
%{~ endif ~}
%{~ if ingress ~}
"traefik.enable=true",
"traefik.http.routers.${name}.entryPoints=websecure",
%{~ if try(ingress_rule, null) != null ~}
"traefik.http.routers.${name}.rule=${ingress_rule}",
%{~ endif ~}
%{~ for middleware in ingress_middlewares ~}
"traefik.http.routers.${name}.middlewares=${middleware}",
%{~ endfor ~}
%{~ endif ~}
%{~ for tag in service_tags ~}
"${tag}",
%{~ endfor ~}
]
}
%{~ endif ~}
%{~ for custom_service in custom_services ~}
service {
name = "${custom_service.name}"
provider = "nomad"
port = "${custom_service.port}"
tags = ${jsonencode(custom_service.tags)}
}
%{~ endfor ~}
task "${name}" { task "${name}" {
driver = "docker" driver = "docker"
%{~ if user != null }
user = "${user}"
%{~ endif ~}
%{~ if length(task_meta) > 0 } %{~ if length(task_meta) > 0 }
meta = { meta = {
%{ for k, v in task_meta ~} %{ for k, v in task_meta ~}
@ -90,70 +111,14 @@ job "${name}" {
%{ endfor ~} %{ endfor ~}
} }
%{~ endif ~} %{~ endif ~}
%{~ if service_port != null }
service {
name = "${replace(name, "_", "-")}"
provider = "nomad"
port = "main"
tags = [
%{~ if prometheus == true ~}
"prometheus.scrape",
%{~ endif ~}
%{~ if ingress ~}
"traefik.enable=true",
"traefik.http.routers.${name}.entryPoints=websecure",
%{~ if try(ingress_rule, null) != null ~}
"traefik.http.routers.${name}.rule=${ingress_rule}",
%{~ endif ~}
%{~ for middleware in ingress_middlewares ~}
"traefik.http.routers.${name}.middlewares=${middleware}",
%{~ endfor ~}
%{~ endif ~}
%{~ for tag in service_tags ~}
"${tag}",
%{~ endfor ~}
]
%{~ if service_check != null ~}
check {
%{~ if service_check.name != "" ~}
name = "${service_check.name}"
%{~ endif ~}
%{~ if service_check.name != "" ~}
port = "${service_check.port}"
%{~ endif ~}
type = "${service_check.type}"
path = "${service_check.path}"
interval = "${service_check.interval}"
timeout = "${service_check.timeout}"
check_restart {
limit = 5
grace = "90s"
}
}
%{~ endif ~}
}
%{~ endif ~}
%{~ for custom_service in custom_services ~}
service {
name = "${custom_service.name}"
provider = "nomad"
port = "${custom_service.port}"
tags = ${jsonencode(custom_service.tags)}
}
%{~ endfor ~}
config { config {
image = "${image}" image = "${image}"
%{~if image_pull_timeout != null ~} %{~if image_pull_timeout != null ~}
image_pull_timeout = "${image_pull_timeout}" image_pull_timeout = "${image_pull_timeout}"
%{~ endif ~} %{~ endif ~}
%{~ if length(try(port_names, [])) > 0 ~} %{~ if service_port != null ~}
ports = ${jsonencode(port_names)} ports = ["main"]
%{~ endif ~} %{~ endif ~}
%{~ if length(try(args, [])) > 0 ~} %{~ if length(try(args, [])) > 0 ~}
args = ${jsonencode(args)} args = ${jsonencode(args)}
@ -185,14 +150,6 @@ job "${name}" {
%{~ endfor ~} %{~ endfor ~}
} }
%{~ endif ~} %{~ endif ~}
%{~ for action in actions }
action "${action.name}" {
command = "${action.command}"
%{~ if length(action.args) > 0 ~}
args = ${jsonencode(action.args)}
%{~ endif ~}
}
%{~ endfor ~}
%{~ for volume in host_volumes } %{~ for volume in host_volumes }
volume_mount { volume_mount {
volume = "${volume.name}" volume = "${volume.name}"
@ -212,23 +169,12 @@ EOF
%{~ if template.right_delimiter != null } %{~ if template.right_delimiter != null }
right_delimiter = "${template.right_delimiter}" right_delimiter = "${template.right_delimiter}"
%{~ endif ~} %{~ endif ~}
%{~ if template.perms != null }
perms = "${template.perms}"
%{~ endif ~}
%{~ if template.change_mode != null } %{~ if template.change_mode != null }
change_mode = "${template.change_mode}" change_mode = "${template.change_mode}"
%{~ endif ~} %{~ endif ~}
%{~ if template.change_signal != null } %{~ if template.change_signal != null }
change_signal = "${template.change_signal}" change_signal = "${template.change_signal}"
%{~ endif ~} %{~ endif ~}
%{~ if template.change_script != null }
change_script {
command = "${template.change_script.command}"
args = ${jsonencode(template.change_script.args)}
timeout = "${template.change_script.timeout}"
fail_on_error = ${template.change_script.fail_on_error}
}
%{~ endif ~}
%{~ if template.env != null } %{~ if template.env != null }
env = ${template.env} env = ${template.env}
%{~ endif ~} %{~ endif ~}
@ -243,10 +189,9 @@ EOF
%{~ endif ~} %{~ endif ~}
} }
%{~ endif ~} %{~ endif ~}
%{~ if task_identity != null } %{~ if workload_identity_env }
identity { identity {
env = ${task_identity.env} env = true
file = ${task_identity.file}
} }
%{~ endif ~} %{~ endif ~}
} }
@ -263,10 +208,10 @@ EOF
image = "mariadb:10" image = "mariadb:10"
args = [ args = [
"/usr/bin/timeout", "/usr/bin/timeout",
"20m", "2m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do echo 'Retry in 10s'; sleep 10; done", "until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
] ]
} }
@ -326,10 +271,10 @@ SELECT 'NOOP';
image = "postgres:14" image = "postgres:14"
args = [ args = [
"/usr/bin/timeout", "/usr/bin/timeout",
"20m", "2m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"until /bin/bash $${NOMAD_TASK_DIR}/bootstrap.sh; do echo 'Retry in 10s'; sleep 10; done", "until /bin/bash $${NOMAD_TASK_DIR}/bootstrap.sh; do sleep 10; done",
] ]
} }
@ -398,8 +343,8 @@ $$;
} }
config { config {
image = "iamthefij/stunnel:1.0.0" image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["/bin/sh", "$${NOMAD_TASK_DIR}/start.sh"]
} }
resources { resources {
@ -410,6 +355,15 @@ $$;
%{~ endif ~} %{~ endif ~}
} }
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
EOF
destination = "$${NOMAD_TASK_DIR}/start.sh"
}
template { template {
data = <<EOF data = <<EOF
syslog = no syslog = no

View File

@ -21,6 +21,7 @@ variable "priority" {
description = "Scheduler priority of the service" description = "Scheduler priority of the service"
} }
variable "image" { variable "image" {
type = string type = string
description = "Image that should be run" description = "Image that should be run"
@ -32,27 +33,12 @@ variable "image_pull_timeout" {
description = "A time duration that controls how long Nomad will wait before cancelling an in-progress pull of the Docker image" description = "A time duration that controls how long Nomad will wait before cancelling an in-progress pull of the Docker image"
} }
variable "user" {
type = string
default = null
description = "User to be passed to the task driver for execution. [ user | user:group | uid | uid:gid | user:gid | uid:group ]"
}
variable "task_meta" { variable "task_meta" {
type = map(string) type = map(string)
default = {} default = {}
description = "Meta attributes to attach to the task" description = "Meta attributes to attach to the task"
} }
variable "task_identity" {
description = "Task workload identity"
type = object({
env = optional(bool, false)
file = optional(bool, false)
})
default = null
}
variable "group_meta" { variable "group_meta" {
type = map(string) type = map(string)
default = {} default = {}
@ -77,19 +63,6 @@ variable "service_port_static" {
description = "Should the port assigned be static" description = "Should the port assigned be static"
} }
variable "ports" {
type = list(object({
name = string
host_network = optional(string)
from = optional(number)
to = optional(number)
static = optional(number)
task_config = optional(bool, false)
}))
default = []
description = "Additional ports (not service_port) to be bound."
}
variable "prometheus" { variable "prometheus" {
type = bool type = bool
default = false default = false
@ -137,7 +110,7 @@ variable "stunnel_resources" {
default = { default = {
cpu = 50 cpu = 50
memory = 15 memory = 50
memory_max = null memory_max = null
} }
@ -168,24 +141,29 @@ variable "service_tags" {
description = "Additional tags to be added to the service." description = "Additional tags to be added to the service."
} }
variable "ports" {
type = list(object({
name = string
host_network = optional(string)
from = optional(number)
to = optional(number)
static = optional(number)
}))
default = []
description = "Additional ports (not service_port) to be bound."
}
variable "templates" { variable "templates" {
type = list(object({ type = list(object({
data = string data = string
dest = string dest = string
dest_prefix = optional(string, "$${NOMAD_TASK_DIR}") dest_prefix = optional(string, "$${NOMAD_TASK_DIR}")
change_mode = optional(string)
change_signal = optional(string)
left_delimiter = optional(string) left_delimiter = optional(string)
right_delimiter = optional(string) right_delimiter = optional(string)
mount = optional(bool, true) mount = optional(bool, true)
env = optional(bool, false) env = optional(bool, false)
perms = optional(string)
change_mode = optional(string)
change_signal = optional(string)
change_script = optional(object({
command = optional(string, "")
args = optional(list(string), [])
timeout = optional(string, "5s")
fail_on_error = optional(bool, false)
}))
})) }))
default = [] default = []
description = "Templates to be used" description = "Templates to be used"
@ -285,38 +263,11 @@ variable "use_wesher" {
default = true default = true
} }
variable "actions" { variable "workload_acl_policy" {
description = "Nomad actions that should be part of the main task"
type = list(object({
name = string
command = string
args = optional(list(string))
cron = optional(string)
}))
default = []
}
variable "service_check" {
description = "Health check for main ingress service"
type = object({ type = object({
name = optional(string, "") name = string
port = optional(string, "") description = string
path = optional(string, "/") rules_hcl = string
interval = optional(string, "30s")
timeout = optional(string, "2s")
type = optional(string, "http")
})
default = {}
}
variable "oidc_client_config" {
description = "Authelia oidc client configuration to enable oidc authentication"
type = object({
description = string
authorization_policy = optional(string, "one_factor")
redirect_uris = list(string)
scopes = list(string)
}) })
default = null default = null

View File

@ -1,66 +0,0 @@
module "sonarr" {
source = "./service"
name = "sonarr"
image = "lscr.io/linuxserver/sonarr:4.0.2"
priority = 55
ingress = true
service_port = 8989
use_wesher = var.use_wesher
ingress_middlewares = [
"authelia@nomad"
]
use_postgres = true
postgres_bootstrap = {
enabled = true
databases = [
"sonarr",
"sonarr-logs",
]
}
env = {
PGID = 100
PUID = 1001
TZ = "America/Los_Angeles"
}
host_volumes = [
{
name = "sonarr-config"
dest = "/config"
read_only = false
},
{
name = "media-write"
dest = "/media"
read_only = false
},
{
name = "media-overflow-write"
dest = "/media-overflow"
read_only = false
},
]
resources = {
cpu = 100
memory = 500
memory_max = 700
}
}
resource "nomad_variable" "authelia_service_rules_sonarr" {
path = "authelia/access_control/service_rules/sonarr"
items = {
name = "sonarr"
rule = <<EOH
policy: bypass
resources:
- '^/api([/?].*)?$'
EOH
}
}

View File

@ -1,40 +0,0 @@
job "unifi-traffic-route-ips" {
type = "batch"
periodic {
cron = "*/15 * * * * *"
prohibit_overlap = true
}
meta = {
"diun.enable" = false
}
group "main" {
task "main" {
driver = "docker"
config {
image = "iamthefij/unifi-traffic-routes:0.0.4"
}
env = {
UNIFI_HOST = "192.168.2.1",
UNIFI_PORT = "443",
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/unifi-traffic-route-ips" -}}
UNIFI_USER={{ .unifi_username }}
UNIFI_PASS={{ .unifi_password }}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/env"
env = true
}
}
}
}

View File

@ -1,3 +0,0 @@
resource "nomad_job" "unifi-traffic-route-ips" {
jobspec = file("${path.module}/unifi-traffic-route-ips.nomad")
}

View File

@ -1,35 +0,0 @@
module "ytdl-web" {
source = "./service"
name = "ytdl-web"
image = "iamthefij/ytdl-web:0.1.4"
args = ["poetry", "run", "python", "-m", "ytdl_web.web", "--downloader"]
ingress = true
service_port = 5000
use_wesher = var.use_wesher
# service_check = null
user = "1001:100"
env = {
QUEUE_DIR = "/data/queue"
OUTPUT_TMPL = "/media/RomeTube/%(uploader)s%(channel)s/%(title)s.%(ext)s"
}
resources = {
cpu = 50
memory = 150
}
host_volumes = [
{
name = "ytdl-web"
dest = "/data"
read_only = false
},
{
name = "media-write"
dest = "/media"
read_only = false
},
]
}