Compare commits

...

56 Commits

Author SHA1 Message Date
5e0393823d Tell service action scheduler to use task socket 2024-10-02 12:07:37 -07:00
78320a8ea4 Upgrade unifi traffic routes to fix bug 2024-10-02 11:29:24 -07:00
03ce47320c Add scheduled import for photoprism 2024-10-02 11:28:24 -07:00
8641bd50e1 Add ability to add actions and schedule them for services 2024-10-02 11:26:57 -07:00
9d7a8029c1 Add additional ansible dependency 2024-10-02 11:25:34 -07:00
9c2bce3fab Clean up lego 2024-10-02 11:25:19 -07:00
c248edfc52 Spit out log message when retrying db connection on bootstrap 2024-10-02 11:24:58 -07:00
1bc46d957a Upgrade nomad 2024-10-02 11:23:39 -07:00
8866434590 Increase db bootstrap timeouts
Prevents service flapping while database is trying to recover
2024-08-30 11:30:02 -07:00
1c783dbdfe Make sure blocky bootstrap never fails
We want to make sure the blocky task is always started, even if mysql isn't reached
2024-08-30 11:27:28 -07:00
f5a180f019 Add dummy stunnel server to blocky
Hopefully this keeps the stunnel instance from failing if mysql and redis
are both unavailable
2024-08-30 11:13:53 -07:00
98c547ebdf Add authelia bypass for some favicons 2024-08-30 11:12:56 -07:00
fc5bce0757 Clean python requirements 2024-08-30 11:12:07 -07:00
2a58439ab5 Simplify passing blocky config to nomad 2024-08-30 11:09:59 -07:00
79648879ab Use new oidc module for setting up oidc with Authelia 2024-08-29 14:07:49 -07:00
9a76c9efef Upgrade nomad 2024-08-29 13:56:54 -07:00
52b0ec3bb6 Add oidc client module 2024-08-29 13:56:41 -07:00
cf43d32d06 Remove n2 host 2024-08-29 13:51:18 -07:00
03dc79c895 Update unifi-traffic-route 2024-08-27 15:29:24 -07:00
fafbb246ff Add oidc to photoprism 2024-08-27 15:28:37 -07:00
e99c5272cc Bump db mem 2024-08-21 20:03:08 -07:00
64b58230e6 Fix sonos list regex 2024-08-21 20:02:56 -07:00
95ca3f40d0 Use vars for external services 2024-08-21 20:02:18 -07:00
534bad2a03 Update nomad 2024-08-21 19:58:35 -07:00
58c483a051 Add overflow volume for some TV 2024-08-21 19:58:22 -07:00
84d7a68088 Make name of anon policy consistent between ansible and tf 2024-08-21 19:58:21 -07:00
8e8dbc3e65 Clean up of iot block lists 2024-07-17 20:08:38 -07:00
77c7c6b36c Disable authelia SMTP check to make aure it starts 2024-07-17 20:06:40 -07:00
505a6b5e8d Bump ytdlw to include deadlock fix 2024-06-27 09:36:57 -07:00
1307864afc Update ytdl to use a known system user 2024-06-26 13:32:54 -07:00
c5743a2578 Add ability to set docker user for services 2024-06-26 13:30:50 -07:00
bd67c60575 Make more things auto-revert if they are broken 2024-06-26 13:29:55 -07:00
3e8c03904d Fix block list for smarttvs in blocky config 2024-06-26 13:29:16 -07:00
408f526239 Remove ipv6 from blocky 2024-06-26 13:28:58 -07:00
c478ba4278 Auto refresh blocky lists when template change 2024-06-26 13:28:45 -07:00
9ee660cb6d Pin stunnel image to speed deployments
This will prevent redownload
2024-06-26 13:27:41 -07:00
2235a00f3b Refactor blocky lists to a new nomad var space to make them easier to manage 2024-06-24 17:04:03 -07:00
1f8014e740 Fix smarttv block lists to get from a domain that works
Also hard codes regex lists because they were formatted for PiHole and
not Blocky.
2024-06-24 13:54:30 -07:00
bc620987b7 Move from Gitea to Nomad Vars for custom block and allow
DNS doesn't route to internal addresses for git.thefij.rocks because
list lookups use bootstrap DNS servers, which don't know about it.
2024-06-24 13:53:34 -07:00
7477cb7227 Upgrade blocky and init fast 2024-06-24 13:53:13 -07:00
6906623fda Add ytdl-web 2024-06-13 16:23:55 -07:00
5547241d11 Upgrade photoprism 2024-06-08 13:40:40 -07:00
81093cedfb Increase memory for syslog jobs
Thry were getting OOM killed
2024-06-08 13:36:23 -07:00
7b41d29eb8 Add health checks and restarts to prometheus 2024-05-30 15:01:42 -07:00
90b7740343 Move Blocky and Exporters away from system to service jobs
This is because service jobs do not get rescheduled when allocs fail
2024-05-30 11:41:40 -07:00
e88c7c250d Bump nomad to 1.8 2024-05-30 11:40:58 -07:00
ed83ab0382 Remove qnomad due to disk errors 2024-05-30 11:40:28 -07:00
3cfbda7a27 Stop using diun for nomad fixers 2024-05-28 12:18:27 -07:00
85c626c96f Use Nomad task socket from Traefik 2024-05-28 12:00:13 -07:00
634d63c26c Stop diun for traffic routes
This was causing a check for each set of dead tasks
2024-05-28 11:45:30 -07:00
205388f283 Update traefik to v3 using canary 2024-05-28 11:43:46 -07:00
bdfde48bec Add some more monitors to nomad minitor 2024-05-06 14:29:17 -07:00
9af55580e7 Update diun config to read from task socket 2024-05-01 10:18:54 -07:00
b9c35bf18f Add ability to set task identities for service module 2024-05-01 10:18:24 -07:00
e7f740a2d9 Add languagetool server 2024-05-01 09:43:28 -07:00
57efee14e9 Update Ansible inventory to split node roles
Splits servers and clients to their own groups so that plays can target
specific roles.

Prior, everything was "both", but i want to and another server for
recovery purposes but not host containers on it.
2024-05-01 09:40:21 -07:00
48 changed files with 1071 additions and 219 deletions

View File

@ -132,7 +132,7 @@
"filename": "core/authelia.yml", "filename": "core/authelia.yml",
"hashed_secret": "a32b08d97b1615dc27f58b6b17f67624c04e2c4f", "hashed_secret": "a32b08d97b1615dc27f58b6b17f67624c04e2c4f",
"is_verified": false, "is_verified": false,
"line_number": 189, "line_number": 201,
"is_secret": false "is_secret": false
} }
], ],
@ -187,5 +187,5 @@
} }
] ]
}, },
"generated_at": "2024-02-20T18:04:29Z" "generated_at": "2024-08-30T18:12:43Z"
} }

View File

@ -1,6 +1,6 @@
resource "nomad_acl_policy" "anon_policy" { resource "nomad_acl_policy" "anon_policy" {
name = "anonymous" name = "anonymous"
description = "Anon RO" description = "Anon read only"
rules_hcl = file("${path.module}/nomad-anon-policy.hcl") rules_hcl = file("${path.module}/nomad-anon-policy.hcl")
} }

View File

@ -1,62 +1,72 @@
--- ---
all: all:
children: hosts:
servers: n1.thefij:
vars: nomad_node_class: ingress
nomad_network_interface: eth0 nomad_reserved_memory: 1024
hosts: # nomad_meta:
n1.thefij: # hw_transcode.device: /dev/dri
nomad_node_role: both # hw_transcode.type: intel
nomad_reserved_memory: 1024 nfs_mounts:
# nomad_meta: - src: 10.50.250.2:/srv/volumes
# hw_transcode.device: /dev/dri path: /srv/volumes/moxy
# hw_transcode.type: intel opts: proto=tcp,rw
nfs_mounts: nomad_unique_host_volumes:
- src: 10.50.250.2:/srv/volumes - name: mysql-data
path: /srv/volumes/moxy path: /srv/volumes/mysql
opts: proto=tcp,rw owner: "999"
nomad_unique_host_volumes: group: "100"
- name: mysql-data mode: "0755"
path: /srv/volumes/mysql read_only: false
owner: "999" - name: postgres-data
group: "100" path: /srv/volumes/postgres
mode: "0755" owner: "999"
read_only: false group: "999"
- name: postgres-data mode: "0755"
path: /srv/volumes/postgres read_only: false
owner: "999" # n2.thefij:
group: "999" # nomad_node_class: ingress
mode: "0755" # nomad_reserved_memory: 1024
read_only: false # nfs_mounts:
n2.thefij: # - src: 10.50.250.2:/srv/volumes
nomad_node_role: both # path: /srv/volumes/moxy
nomad_node_class: ingress # opts: proto=tcp,rw
nomad_reserved_memory: 1024 # nomad_unique_host_volumes:
nfs_mounts: # - name: nextcloud-data
- src: 10.50.250.2:/srv/volumes # path: /srv/volumes/nextcloud
path: /srv/volumes/moxy # owner: "root"
opts: proto=tcp,rw # group: "bin"
nomad_unique_host_volumes: # mode: "0755"
- name: nextcloud-data # read_only: false
path: /srv/volumes/nextcloud pi4:
owner: "root" nomad_node_class: ingress
group: "bin" nomad_reserved_memory: 512
mode: "0755" nomad_meta:
read_only: false hw_transcode.device: /dev/video11
pi4: hw_transcode.type: raspberry
nomad_node_role: both qnomad.thefij:
nomad_node_class: ingress ansible_host: 192.168.2.234
nomad_reserved_memory: 512 nomad_reserved_memory: 1024
nomad_meta: # This VM uses a non-standard interface
hw_transcode.device: /dev/video11 nomad_network_interface: ens3
hw_transcode.type: raspberry
qnomad.thefij:
ansible_host: 192.168.2.234
nomad_node_role: both
nomad_reserved_memory: 1024
# This VM uses a non-standard interface
nomad_network_interface: ens3
nomad_instances: nomad_instances:
children: vars:
servers: {} nomad_network_interface: eth0
children:
nomad_servers: {}
nomad_clients: {}
nomad_servers:
hosts:
nonopi.thefij:
ansible_host: 192.168.2.170
n1.thefij: {}
# n2.thefij: {}
pi4: {}
# qnomad.thefij: {}
nomad_clients:
hosts:
n1.thefij: {}
# n2.thefij: {}
pi4: {}
# qnomad.thefij: {}

View File

@ -14,7 +14,7 @@
state: restarted state: restarted
become: true become: true
- name: Start Dockee - name: Start Docker
systemd: systemd:
name: docker name: docker
state: started state: started

View File

@ -1,6 +1,6 @@
--- ---
- name: Recover Nomad - name: Recover Nomad
hosts: nomad_instances hosts: nomad_servers
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

View File

@ -14,7 +14,7 @@
line: "nameserver {{ non_nomad_dns }}" line: "nameserver {{ non_nomad_dns }}"
- name: Install Docker - name: Install Docker
hosts: nomad_instances hosts: nomad_clients
become: true become: true
vars: vars:
docker_architecture_map: docker_architecture_map:
@ -44,7 +44,7 @@
# state: present # state: present
- name: Create NFS mounts - name: Create NFS mounts
hosts: nomad_instances hosts: nomad_clients
become: true become: true
vars: vars:
shared_nfs_mounts: shared_nfs_mounts:
@ -56,6 +56,10 @@
path: /srv/volumes/media-write path: /srv/volumes/media-write
opts: proto=tcp,port=2049,rw opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Overflow
path: /srv/volumes/nas-overflow
opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Photos - src: 192.168.2.10:/Photos
path: /srv/volumes/photos path: /srv/volumes/photos
opts: proto=tcp,port=2049,rw opts: proto=tcp,port=2049,rw
@ -97,6 +101,12 @@
group: "root" group: "root"
mode: "0755" mode: "0755"
read_only: false read_only: false
- name: media-overflow-write
path: /srv/volumes/nas-overflow/Media
owner: "root"
group: "root"
mode: "0755"
read_only: false
- name: media-downloads - name: media-downloads
path: /srv/volumes/media-write/Downloads path: /srv/volumes/media-write/Downloads
read_only: false read_only: false
@ -127,6 +137,9 @@
- name: gitea-data - name: gitea-data
path: /srv/volumes/nas-container/gitea path: /srv/volumes/nas-container/gitea
read_only: false read_only: false
- name: ytdl-web
path: /srv/volumes/nas-container/ytdl-web
read_only: false
- name: all-volumes - name: all-volumes
path: /srv/volumes path: /srv/volumes
owner: "root" owner: "root"
@ -137,9 +150,10 @@
roles: roles:
- name: ansible-nomad - name: ansible-nomad
vars: vars:
nomad_version: "1.7.6-1" nomad_version: "1.8.4-1"
nomad_install_upgrade: true nomad_install_upgrade: true
nomad_allow_purge_config: true nomad_allow_purge_config: true
nomad_node_role: "{% if 'nomad_clients' in group_names %}{% if 'nomad_servers' in group_names %}both{% else %}client{% endif %}{% else %}server{% endif %}"
# Where nomad gets installed to # Where nomad gets installed to
nomad_bin_dir: /usr/bin nomad_bin_dir: /usr/bin
@ -213,7 +227,7 @@
enabled: true enabled: true
- name: Bootstrap Nomad ACLs and scheduler - name: Bootstrap Nomad ACLs and scheduler
hosts: nomad_instances hosts: nomad_servers
tasks: tasks:
- name: Start Nomad - name: Start Nomad
@ -294,7 +308,7 @@
- acl - acl
- policy - policy
- apply - apply
- -description="Anon read only" - -description=Anon read only
- anonymous - anonymous
- /tmp/anonymous.policy.hcl - /tmp/anonymous.policy.hcl
environment: environment:

View File

@ -116,6 +116,7 @@ nomad/jobs/photoprism:
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
db_user: VALUE db_user: VALUE
oidc_secret: VALUE
nomad/jobs/postgres-server: nomad/jobs/postgres-server:
superuser: VALUE superuser: VALUE
superuser_pass: VALUE superuser_pass: VALUE

View File

@ -190,7 +190,7 @@ job "Dummy" {
} }
config { config {
image = "iamthefij/stunnel:latest" image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
} }

View File

@ -8,7 +8,7 @@ resource "nomad_job" "backup" {
resource "nomad_job" "backup-oneoff" { resource "nomad_job" "backup-oneoff" {
# TODO: Get list of nomad hosts dynamically # TODO: Get list of nomad hosts dynamically
for_each = toset(["n1", "n2", "pi4"]) for_each = toset(["n1", "pi4"])
# for_each = toset([ # for_each = toset([
# for node in data.consul_service.nomad.service : # for node in data.consul_service.nomad.service :
# node.node_name # node.node_name
@ -24,7 +24,7 @@ resource "nomad_job" "backup-oneoff" {
locals { locals {
# NOTE: This can't be dynamic in first deploy since these values are not known # NOTE: This can't be dynamic in first deploy since these values are not known
# all_job_ids = toset(flatten([[for job in resource.nomad_job.backup-oneoff : job.id], [resource.nomad_job.backup.id]])) # all_job_ids = toset(flatten([[for job in resource.nomad_job.backup-oneoff : job.id], [resource.nomad_job.backup.id]]))
all_job_ids = toset(["backup", "backup-oneoff-n1", "backup-oneoff-n2", "backup-oneoff-pi4"]) all_job_ids = toset(["backup", "backup-oneoff-n1", "backup-oneoff-pi4"])
} }
resource "nomad_acl_policy" "secrets_mysql" { resource "nomad_acl_policy" "secrets_mysql" {

View File

@ -114,6 +114,9 @@ namespace "default" {
path "authelia/*" { path "authelia/*" {
capabilities = ["read"] capabilities = ["read"]
} }
path "secrets/authelia/*" {
capabilities = ["read"]
}
} }
} }
EOH EOH
@ -142,6 +145,22 @@ EOH
} }
} }
# Enable oidc for nomad clients
module "nomad_oidc_client" {
source = "./oidc_client"
name = "nomad"
oidc_client_config = {
description = "Nomad"
authorization_policy = "two_factor"
redirect_uris = [
"https://nomad.${var.base_hostname}/oidc/callback",
"https://nomad.${var.base_hostname}/ui/settings/tokens",
]
scopes = ["openid", "groups"]
}
}
resource "nomad_acl_auth_method" "nomad_authelia" { resource "nomad_acl_auth_method" "nomad_authelia" {
name = "authelia" name = "authelia"
type = "OIDC" type = "OIDC"
@ -151,8 +170,8 @@ resource "nomad_acl_auth_method" "nomad_authelia" {
config { config {
oidc_discovery_url = "https://authelia.${var.base_hostname}" oidc_discovery_url = "https://authelia.${var.base_hostname}"
oidc_client_id = "nomad" oidc_client_id = module.nomad_oidc_client.client_id
oidc_client_secret = yamldecode(file("${path.module}/../ansible_playbooks/vars/nomad_vars.yml"))["nomad/oidc"]["secret"] oidc_client_secret = module.nomad_oidc_client.secret
bound_audiences = ["nomad"] bound_audiences = ["nomad"]
oidc_scopes = [ oidc_scopes = [
"groups", "groups",

View File

@ -151,6 +151,18 @@ access_control:
networks: 192.168.5.0/24 networks: 192.168.5.0/24
rules: rules:
## Allow favicons on internal network
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
resources:
- '^/apple-touch-icon-precomposed\.png$'
- '^/assets/safari-pinned-tab\.svg$'
- '^/apple-touch-icon-180x180\.png$'
- '^/apple-touch-icon\.png$'
- '^/favicon\.ico$'
networks:
- internal
policy: bypass
{{ range nomadVarList "authelia/access_control/service_rules" }}{{ with nomadVar .Path }} {{ range nomadVarList "authelia/access_control/service_rules" }}{{ with nomadVar .Path }}
- domain: '{{ .name }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}' - domain: '{{ .name }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
{{ .rule.Value | indent 6 }} {{ .rule.Value | indent 6 }}
@ -223,7 +235,7 @@ storage:
## The available providers are: filesystem, smtp. You must use only one of these providers. ## The available providers are: filesystem, smtp. You must use only one of these providers.
notifier: notifier:
## You can disable the notifier startup check by setting this to true. ## You can disable the notifier startup check by setting this to true.
disable_startup_check: false disable_startup_check: true
{{ with nomadVar "secrets/smtp" }} {{ with nomadVar "secrets/smtp" }}
smtp: smtp:
@ -249,4 +261,18 @@ identity_providers:
# hmac_secret: <file> # hmac_secret: <file>
# issuer_private_key: <file> # issuer_private_key: <file>
clients: {{ with nomadVar "nomad/jobs/authelia" }}{{ .oidc_clients.Value }}{{ end }} clients:
{{ range nomadVarList "authelia/access_control/oidc_clients" -}}
{{- $name := (sprig_last (sprig_splitList "/" .Path)) -}}
{{ "-" | indent 6 }}
{{ with nomadVar .Path }}
{{- $im := .ItemsMap -}}
{{- $im = sprig_set $im "redirect_uris" (.redirect_uris.Value | parseYAML) -}}
{{- $im = sprig_set $im "scopes" (.scopes.Value | parseYAML) -}}
{{- with nomadVar (printf "secrets/authelia/%s" $name) -}}
{{- $im = sprig_set $im "secret" .secret_hash.Value -}}
{{- end -}}
{{ $im | toYAML | indent 8 }}
{{ end }}
{{ end }}

View File

@ -1,20 +1,23 @@
variable "config_data" {
type = string
description = "Plain text config file for blocky"
}
job "blocky" { job "blocky" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "system" type = "service"
priority = 100 priority = 100
constraint {
distinct_hosts = true
}
update { update {
max_parallel = 1 max_parallel = 1
# TODO: maybe switch to service job from system so we can use canary and autorollback auto_revert = true
# auto_revert = true min_healthy_time = "60s"
healthy_deadline = "5m"
} }
group "blocky" { group "blocky" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/12023
count = 2
network { network {
mode = "bridge" mode = "bridge"
@ -62,6 +65,11 @@ job "blocky" {
path = "/" path = "/"
interval = "10s" interval = "10s"
timeout = "3s" timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
} }
} }
@ -69,11 +77,21 @@ job "blocky" {
driver = "docker" driver = "docker"
config { config {
image = "ghcr.io/0xerr0r/blocky:v0.22" image = "ghcr.io/0xerr0r/blocky:v0.24"
args = ["-c", "$${NOMAD_TASK_DIR}/config.yml"] args = ["-c", "$${NOMAD_TASK_DIR}/config.yml"]
ports = ["dns", "api"] ports = ["dns", "api"]
} }
action "refresh-lists" {
command = "/app/blocky"
args = ["lists", "refresh"]
}
action "healthcheck" {
command = "/app/blocky"
args = ["healthcheck"]
}
resources { resources {
cpu = 50 cpu = 50
memory = 75 memory = 75
@ -81,7 +99,9 @@ job "blocky" {
} }
template { template {
data = var.config_data data = <<EOF
${file("${module_path}/config.yml")}
EOF
destination = "$${NOMAD_TASK_DIR}/config.yml" destination = "$${NOMAD_TASK_DIR}/config.yml"
splay = "1m" splay = "1m"
@ -107,6 +127,121 @@ job "blocky" {
max = "20s" max = "20s"
} }
} }
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/user" }}
{{ with nomadVar "blocky_lists/user" -}}
{{ .block_list.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/block"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "30s"
max = "1m"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/user" }}
{{ with nomadVar "blocky_lists/user" -}}
{{ .allow_list.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/allow"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "30s"
max = "1m"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .smarttv_regex.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/smarttv-regex.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .wemo.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/wemo.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .sonos.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/sonos.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
} }
task "stunnel" { task "stunnel" {
@ -118,7 +253,7 @@ job "blocky" {
} }
config { config {
image = "iamthefij/stunnel:latest" image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
} }
@ -134,6 +269,13 @@ syslog = no
foreground = yes foreground = yes
delay = yes delay = yes
[dns_server]
# Dummy server to keep stunnel running if no mysql is present
accept = 8053
connect = 127.0.0.1:53
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}} {{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
[mysql_client] [mysql_client]
client = yes client = yes
@ -179,11 +321,9 @@ EOF
config { config {
image = "mariadb:10" image = "mariadb:10"
args = [ args = [
"/usr/bin/timeout",
"2m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done", "/usr/bin/timeout 2m /bin/bash -c \"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do echo 'Retry in 10s'; sleep 10; done\" || true",
] ]
} }

View File

@ -1,16 +1,7 @@
locals {
config_data = file("${path.module}/config.yml")
}
resource "nomad_job" "blocky" { resource "nomad_job" "blocky" {
hcl2 {
vars = {
"config_data" = local.config_data,
}
}
jobspec = templatefile("${path.module}/blocky.nomad", { jobspec = templatefile("${path.module}/blocky.nomad", {
use_wesher = var.use_wesher, use_wesher = var.use_wesher,
module_path = path.module,
}) })
} }
@ -66,3 +57,32 @@ EOH
task = "stunnel" task = "stunnel"
} }
} }
resource "nomad_variable" "blocky_lists_terraform" {
path = "blocky_lists/terraform"
items = {
smarttv_regex = file("${path.module}/list-smarttv-regex.txt")
wemo = file("${path.module}/list-wemo.txt")
sonos = file("${path.module}/list-sonos.txt")
}
}
resource "nomad_acl_policy" "blocky_lists" {
name = "blocky-lists"
description = "Give access Blocky lists"
rules_hcl = <<EOH
namespace "default" {
variables {
path "blocky_lists/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "blocky"
}
}

View File

@ -2,6 +2,9 @@ ports:
dns: 53 dns: 53
http: 4000 http: 4000
# I must have ip v6 blocked or something
connectIPVersion: v4
bootstrapDns: bootstrapDns:
- upstream: 1.1.1.1 - upstream: 1.1.1.1
- upstream: 1.0.0.1 - upstream: 1.0.0.1
@ -10,40 +13,42 @@ bootstrapDns:
upstreams: upstreams:
init:
strategy: fast
groups: groups:
default: default:
- https://dns.quad9.net/dns-query - https://dns.quad9.net/dns-query
- tcp-tls:dns.quad9.net - tcp-tls:dns.quad9.net
- https://one.one.one.one/dns-query - https://one.one.one.one/dns-query
- tcp-tls:one.one.one.one - tcp-tls:one.one.one.one
cloudflare: # cloudflare:
- 1.1.1.1 # - 1.1.1.1
- 1.0.0.1 # - 1.0.0.1
- 2606:4700:4700::1111 # - 2606:4700:4700::1111
- 2606:4700:4700::1001 # - 2606:4700:4700::1001
- https://one.one.one.one/dns-query # - https://one.one.one.one/dns-query
- tcp-tls:one.one.one.one # - tcp-tls:one.one.one.one
quad9: # quad9:
- 9.9.9.9 # - 9.9.9.9
- 149.112.112.112 # - 149.112.112.112
- 2620:fe::fe # - 2620:fe::fe
- 2620:fe::9 # - 2620:fe::9
- https://dns.quad9.net/dns-query # - https://dns.quad9.net/dns-query
- tcp-tls:dns.quad9.net # - tcp-tls:dns.quad9.net
quad9-secured: # quad9-secured:
- 9.9.9.11 # - 9.9.9.11
- 149.112.112.11 # - 149.112.112.11
- 2620:fe::11 # - 2620:fe::11
- 2620:fe::fe:11 # - 2620:fe::fe:11
- https://dns11.quad9.net/dns-query # - https://dns11.quad9.net/dns-query
- tcp-tls:dns11.quad9.net # - tcp-tls:dns11.quad9.net
quad9-unsecured: # quad9-unsecured:
- 9.9.9.10 # - 9.9.9.10
- 149.112.112.10 # - 149.112.112.10
- 2620:fe::10 # - 2620:fe::10
- 2620:fe::fe:10 # - 2620:fe::fe:10
- https://dns10.quad9.net/dns-query # - https://dns10.quad9.net/dns-query
- tcp-tls:dns10.quad9.net # - tcp-tls:dns10.quad9.net
conditional: conditional:
fallbackUpstream: false fallbackUpstream: false
@ -73,19 +78,11 @@ blocking:
- https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt - https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt
- https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt - https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
# - https://hosts-file.net/ad_servers.txt # - https://hosts-file.net/ad_servers.txt
smarttv: iot:
- https://perflyst.github.io/PiHoleBlocklist/SmartTV.txt - https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV.txt
# - https://perflyst.github.io/PiHoleBlocklist/regex.list - {{ env "NOMAD_TASK_DIR" }}/smarttv-regex.txt
wemo: - {{ env "NOMAD_TASK_DIR" }}/wemo.txt
- | - {{ env "NOMAD_TASK_DIR" }}/sonos.txt
# Remote commands
api.xbcs.net
# Firmware updates
fw.xbcs.net
# TURN service
nat.wemo2.com
# Connectivity checks
heartbeat.xwemo.com
antisocial: antisocial:
- | - |
facebook.com facebook.com
@ -94,22 +91,20 @@ blocking:
twitter.com twitter.com
youtube.com youtube.com
custom: custom:
- https://git.thefij.rocks/iamthefij/blocklists/raw/branch/main/block - {{ env "NOMAD_TASK_DIR" }}/block
whiteLists: whiteLists:
ads:
{{ with nomadVar "nomad/jobs/blocky" -}}
{{ .whitelists_ads.Value | indent 6 }}
{{- end }}
custom: custom:
- https://git.thefij.rocks/iamthefij/blocklists/raw/branch/main/allow - {{ env "NOMAD_TASK_DIR" }}/allow
clientGroupsBlock: clientGroupsBlock:
default: default:
- ads - ads
- custom - custom
- smarttv 192.168.3.1/24:
- wemo - ads
- iot
- custom
customDNS: customDNS:
customTTL: 1h customTTL: 1h
@ -137,7 +132,6 @@ redis:
connectionCooldown: 3s connectionCooldown: 3s
{{ end -}} {{ end -}}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}} {{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
{{ with nomadVar "nomad/jobs/blocky" -}} {{ with nomadVar "nomad/jobs/blocky" -}}
queryLog: queryLog:

View File

@ -0,0 +1,13 @@
# From: https://perflyst.github.io/PiHoleBlocklist/regex.list
# Title: Perflyst's SmartTV Blocklist for Pi-hole - RegEx extension
# Version: 13July2023v1
# Samsung
/(^|\.)giraffic\.com$/
/(^|\.)internetat\.tv$/
/(^|\.)pavv\.co\.kr$/
/(^|\.)samsungcloudsolution\.net$/
/(^|\.)samsungelectronics\.com$/
/(^|\.)samsungrm\.net$/
# /(^|\.)samsungotn\.net$/ # prevents updates
# /(^|\.)samsungcloudcdn\.com$/ # prevents updates
# /(^|\.)samsungcloudsolution\.com$/ # prevents internet connection

View File

@ -0,0 +1,2 @@
# Block Sonos devices from phoning home and allowing remote access
/(^|\.)sonos\.com$/

View File

@ -0,0 +1,8 @@
# Remote commands
api.xbcs.net
# Firmware updates
fw.xbcs.net
# TURN service
nat.wemo2.com
# Connectivity checks
heartbeat.xwemo.com

View File

@ -1,8 +1,16 @@
job "exporters" { job "exporters" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "system" type = "service"
priority = 55
constraint {
distinct_hosts = true
}
group "promtail" { group "promtail" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/1202
count = 2
network { network {
mode = "bridge" mode = "bridge"

View File

@ -40,7 +40,7 @@ job "grafana" {
} }
config { config {
image = "iamthefij/stunnel:latest" image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
} }
@ -86,10 +86,10 @@ EOF
image = "mariadb:10" image = "mariadb:10"
args = [ args = [
"/usr/bin/timeout", "/usr/bin/timeout",
"2m", "20m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done", "until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do echo 'Retry in 10s'; sleep 10; done",
] ]
} }
@ -155,7 +155,6 @@ GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }}
GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .minio_access_key }} GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .minio_access_key }}
GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .minio_secret_key }} GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .minio_secret_key }}
GRAFANA_ALERT_EMAIL_ADDRESSES={{ .alert_email_addresses }} GRAFANA_ALERT_EMAIL_ADDRESSES={{ .alert_email_addresses }}
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET={{ .oidc_secret }}
{{ if .db_name -}} {{ if .db_name -}}
# Database storage # Database storage
GF_DATABASE_TYPE=mysql GF_DATABASE_TYPE=mysql
@ -167,6 +166,10 @@ GF_DATABASE_PASSWORD={{ .db_pass }}
SLACK_BOT_URL={{ .slack_bot_url }} SLACK_BOT_URL={{ .slack_bot_url }}
SLACK_BOT_TOKEN={{ .slack_bot_token }} SLACK_BOT_TOKEN={{ .slack_bot_token }}
SLACK_HOOK_URL={{ .slack_hook_url }} SLACK_HOOK_URL={{ .slack_hook_url }}
{{ end -}}
{{ with nomadVar "secrets/authelia/grafana" -}}
GF_AUTH_GENERIC_OAUTH_CLIENT_ID={{ .client_id }}
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET={{ .secret }}
{{ end -}} {{ end -}}
EOF EOF
env = true env = true

View File

@ -261,7 +261,7 @@ log_queries =
enabled = true enabled = true
name = Authelia name = Authelia
;allow_sign_up = true ;allow_sign_up = true
client_id = grafana client_id = from_env
client_secret = from_env client_secret = from_env
scopes = openid profile email groups scopes = openid profile email groups
auth_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/authorization auth_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/authorization

View File

@ -29,7 +29,6 @@ job "lego" {
driver = "exec" driver = "exec"
config { config {
# image = "alpine:3"
command = "/bin/bash" command = "/bin/bash"
args = ["${NOMAD_TASK_DIR}/start.sh"] args = ["${NOMAD_TASK_DIR}/start.sh"]
} }

View File

@ -93,3 +93,27 @@ EOH
task = "stunnel" task = "stunnel"
} }
} }
module "grafana_oidc" {
source = "./oidc_client"
name = "grafana"
oidc_client_config = {
description = "Grafana"
scopes = [
"openid",
"groups",
"email",
"profile",
]
redirect_uris = [
"https://grafana.thefij.rocks/login/generic_oauth",
]
}
job_acl = {
job_id = "grafana"
group = "grafana"
task = "grafana"
}
}

View File

@ -0,0 +1,40 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.3.1"
hashes = [
"h1:lMueBNB2GJ/a5rweL9NPybwVfDH/Q1s+rQvt5Y+kuYs=",
"zh:1e7893a3fbebff171bcc5581b70a16eea33193c7e9dd73402ba5c04b7202f0bb",
"zh:252cfd3fee4811c83bc74406ba1bc1bbb83d6de20e50a86f93737f8f86864171",
"zh:387a7140be6dfa3f8d27f09d1eb2b9f3b84900328fe5a0478e9b3bd91a845808",
"zh:49848fa491ac26b0568b112a57d14cc49772607c7cf405e2f74dd537407214b1",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:7b9f345f5bb5f17c5d0bc3d373c25828934a3cbcdb331e0eab54eb47f1355fb2",
"zh:8e276f4de508a86e725fffc02ee891db73397c35dbd591d8918af427eeec93a1",
"zh:90b349933d2fd28f822a36128be4625bb816aa9f20ec314c79c77306f632ae87",
"zh:a0ca6fd6cd94a52684e432104d3dc170a74075f47d9d4ba725cc340a438ed75a",
"zh:a6cffc45535a0ff8206782538b3eeaef17dc93d0e1fd58bc1e6f7d5aa0f6ba1a",
"zh:c010807b5d3e03d769419787b0e5d4efa6963134e1873a413102af6bf3dd1c49",
"zh:faf962ee1981e897e99f7e528642c7e74beed37afd8eaf743e6ede24df812d80",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.2"
hashes = [
"h1:wmG0QFjQ2OfyPy6BB7mQ57WtoZZGGV07uAPQeDmIrAE=",
"zh:0ef01a4f81147b32c1bea3429974d4d104bbc4be2ba3cfa667031a8183ef88ec",
"zh:1bcd2d8161e89e39886119965ef0f37fcce2da9c1aca34263dd3002ba05fcb53",
"zh:37c75d15e9514556a5f4ed02e1548aaa95c0ecd6ff9af1119ac905144c70c114",
"zh:4210550a767226976bc7e57d988b9ce48f4411fa8a60cd74a6b246baf7589dad",
"zh:562007382520cd4baa7320f35e1370ffe84e46ed4e2071fdc7e4b1a9b1f8ae9b",
"zh:5efb9da90f665e43f22c2e13e0ce48e86cae2d960aaf1abf721b497f32025916",
"zh:6f71257a6b1218d02a573fc9bff0657410404fb2ef23bc66ae8cd968f98d5ff6",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:9647e18f221380a85f2f0ab387c68fdafd58af6193a932417299cdcae4710150",
"zh:bb6297ce412c3c2fa9fec726114e5e0508dd2638cad6a0cb433194930c97a544",
"zh:f83e925ed73ff8a5ef6e3608ad9225baa5376446349572c2449c0c0b3cf184b7",
"zh:fbef0781cb64de76b1df1ca11078aecba7800d82fd4a956302734999cfd9a4af",
]
}

50
core/oidc_client/main.tf Normal file
View File

@ -0,0 +1,50 @@
resource "random_password" "oidc_client_id" {
length = 72
override_special = "-._~"
}
resource "random_password" "oidc_secret" {
length = 72
override_special = "-._~"
}
resource "nomad_variable" "authelia_oidc_secret" {
path = "secrets/authelia/${var.name}"
items = {
client_id = resource.random_password.oidc_client_id.result
secret = resource.random_password.oidc_secret.result
secret_hash = resource.random_password.oidc_secret.bcrypt_hash
}
}
resource "nomad_variable" "authelia_access_control_oidc" {
path = "authelia/access_control/oidc_clients/${var.name}"
items = {
id = resource.random_password.oidc_client_id.result
description = var.oidc_client_config.description
authorization_policy = var.oidc_client_config.authorization_policy
redirect_uris = yamlencode(var.oidc_client_config.redirect_uris)
scopes = yamlencode(var.oidc_client_config.scopes)
}
}
resource "nomad_acl_policy" "oidc_authelia" {
count = var.job_acl != null ? 1 : 0
name = "${var.name}-authelia"
description = "Give access to shared authelia variables"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/authelia/${var.name}" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = var.job_acl.job_id
group = var.job_acl.group
task = var.job_acl.task
}
}

View File

@ -0,0 +1,11 @@
output "client_id" {
value = resource.random_password.oidc_client_id.result
}
output "secret" {
value = resource.random_password.oidc_secret.result
}
output "secret_hash" {
value = resource.random_password.oidc_secret.bcrypt_hash
}

25
core/oidc_client/vars.tf Normal file
View File

@ -0,0 +1,25 @@
variable "name" {
description = "Name of service"
type = string
}
variable "oidc_client_config" {
description = "Authelia oidc client configuration to enable oidc authentication"
type = object({
description = string
authorization_policy = optional(string, "one_factor")
redirect_uris = list(string)
scopes = list(string)
})
}
variable "job_acl" {
description = "Job ACL that should be given to the secrets"
type = object({
job_id = string
group = optional(string)
task = optional(string)
})
default = null
}

View File

@ -37,12 +37,36 @@ job "prometheus" {
"traefik.enable=true", "traefik.enable=true",
"traefik.http.routers.prometheus.entryPoints=websecure", "traefik.http.routers.prometheus.entryPoints=websecure",
] ]
check {
type = "http"
path = "/-/healthy"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
} }
service { service {
name = "pushgateway" name = "pushgateway"
provider = "nomad" provider = "nomad"
port = "pushgateway" port = "pushgateway"
check {
type = "http"
path = "/-/healthy"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
} }
task "prometheus" { task "prometheus" {

View File

@ -70,7 +70,7 @@ EOF
resources { resources {
cpu = 50 cpu = 50
memory = 20 memory = 50
} }
} }
} }
@ -134,7 +134,7 @@ EOF
resources { resources {
cpu = 50 cpu = 50
memory = 10 memory = 50
} }
} }
} }

View File

@ -14,9 +14,11 @@ job "traefik" {
update { update {
max_parallel = 1 max_parallel = 1
# canary = 1 canary = 1
# auto_promote = true auto_promote = false
auto_revert = true auto_revert = true
min_healthy_time = "30s"
healthy_deadline = "5m"
} }
group "traefik" { group "traefik" {
@ -88,7 +90,7 @@ job "traefik" {
} }
config { config {
image = "traefik:2.10" image = "traefik:3.0"
ports = ["web", "websecure", "syslog", "gitssh", "metrics"] ports = ["web", "websecure", "syslog", "gitssh", "metrics"]
network_mode = "host" network_mode = "host"
@ -112,6 +114,14 @@ job "traefik" {
} }
} }
env = {
TRAEFIK_PROVIDERS_NOMAD_ENDPOINT_TOKEN = "${NOMAD_TOKEN}"
}
identity {
env = true
}
template { template {
# Avoid conflict with TOML lists [[ ]] and Go templates {{ }} # Avoid conflict with TOML lists [[ ]] and Go templates {{ }}
left_delimiter = "<<" left_delimiter = "<<"
@ -164,7 +174,7 @@ job "traefik" {
exposedByDefault = false exposedByDefault = false
defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)" defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)"
[providers.nomad.endpoint] [providers.nomad.endpoint]
address = "http://127.0.0.1:4646" address = "unix:///secrets/api.sock"
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/traefik.toml" destination = "${NOMAD_TASK_DIR}/config/traefik.toml"
} }
@ -178,14 +188,25 @@ job "traefik" {
service = "nomad" service = "nomad"
rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)" rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
{{ with nomadVar "nomad/jobs/traefik" }}{{ with .external }}{{ with .Value | parseYAML -}} {{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path }}
{{ range $service, $url := . }} [http.routers.{{ .name }}]
[http.routers.{{ $service }}]
entryPoints = ["websecure"] entryPoints = ["websecure"]
service = "{{ $service }}" service = "{{ .name }}"
rule = "Host(`{{ $service }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)" rule = "Host(`{{ .subdomain }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`){{ with .path_prefix.Value }}&&PathPrefix(`{{ . }}`){{ end }}"
{{ end }} {{ $name := .name -}}
{{- end }}{{ end }}{{ end }} {{ with .path_prefix.Value -}}
middlewares = ["{{ $name }}@file"]
{{ end }}
{{- end }}{{ end }}
#[http.middlewares]
# {{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path -}}
# {{ $name := .name -}}
# {{ with .path_prefix.Value -}}
# [http.middlewares.{{ $name }}.stripPrefix]
# prefixes = ["{{ . }}"]
# {{ end }}
# {{- end }}{{ end }}
[http.services] [http.services]
[http.services.nomad] [http.services.nomad]
@ -193,14 +214,12 @@ job "traefik" {
[[http.services.nomad.loadBalancer.servers]] [[http.services.nomad.loadBalancer.servers]]
url = "http://127.0.0.1:4646" url = "http://127.0.0.1:4646"
{{ with nomadVar "nomad/jobs/traefik" }}{{ with .external }}{{ with .Value | parseYAML -}} {{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path }}
{{ range $service, $url := . }} [http.services.{{ .name }}]
[http.services.{{ $service }}] [http.services.{{ .name }}.loadBalancer]
[http.services.{{ $service }}.loadBalancer] [[http.services.{{ .name }}.loadBalancer.servers]]
[[http.services.{{ $service }}.loadBalancer.servers]] url = "{{ .url }}"
url = "{{ $url }}" {{- end }}{{ end }}
{{ end }}
{{- end }}{{ end }}{{ end }}
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/conf/route-hashi.toml" destination = "${NOMAD_TASK_DIR}/config/conf/route-hashi.toml"
change_mode = "noop" change_mode = "noop"

View File

@ -21,3 +21,61 @@ EOH
job_id = resource.nomad_job.traefik.id job_id = resource.nomad_job.traefik.id
} }
} }
resource "nomad_acl_policy" "traefik_query_jobs" {
name = "traefik-query-jobs"
description = "Allow traefik to query jobs"
rules_hcl = <<EOH
namespace "default" {
capabilities = ["list-jobs", "read-job"]
}
EOH
job_acl {
job_id = resource.nomad_job.traefik.id
}
}
resource "nomad_acl_policy" "treafik_external" {
name = "traefik-exernal"
description = "Read external services"
rules_hcl = <<EOH
namespace "default" {
variables {
path "traefik_external/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "traefik"
}
}
resource "nomad_variable" "traefik_external_hass" {
path = "traefik_external/hass"
items = {
name = "hass"
subdomain = "hass",
url = "http://192.168.3.65:8123"
}
}
resource "nomad_variable" "traefik_external_plex" {
path = "traefik_external/plex"
items = {
name = "plex"
subdomain = "plex",
url = "http://agnosticfront.thefij:32400"
}
}
resource "nomad_variable" "traefik_external_appdaemon" {
path = "traefik_external/appdaemon"
items = {
name = "appdaemon"
subdomain = "appdash",
url = "http://192.168.3.65:5050"
# path_prefix = "/add"
}
}

View File

@ -3,6 +3,10 @@ job "lldap" {
type = "service" type = "service"
priority = 80 priority = 80
update {
auto_revert = true
}
group "lldap" { group "lldap" {
network { network {
@ -141,7 +145,7 @@ user = "{{ .user }}"
image = "mariadb:10" image = "mariadb:10"
args = [ args = [
"/usr/bin/timeout", "/usr/bin/timeout",
"2m", "20m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done", "until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
@ -195,7 +199,7 @@ SELECT 'NOOP';
} }
config { config {
image = "iamthefij/stunnel:latest" image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
} }

View File

@ -3,6 +3,10 @@ job "mysql-server" {
type = "service" type = "service"
priority = 80 priority = 80
update {
auto_revert = true
}
group "mysql-server" { group "mysql-server" {
count = 1 count = 1
@ -73,7 +77,7 @@ MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
resources { resources {
cpu = 300 cpu = 300
memory = 1536 memory = 1600
} }
} }
@ -81,7 +85,7 @@ MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/stunnel:latest" image = "iamthefij/stunnel:1.0.0"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"] args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
} }

View File

@ -3,6 +3,10 @@ job "postgres-server" {
type = "service" type = "service"
priority = 80 priority = 80
update {
auto_revert = true
}
group "postgres-server" { group "postgres-server" {
count = 1 count = 1
@ -73,8 +77,8 @@ POSTGRES_PASSWORD={{ .superuser_pass }}
resources { resources {
cpu = 500 cpu = 500
memory = 700 memory = 800
memory_max = 1200 memory_max = 1500
} }
} }
@ -82,7 +86,7 @@ POSTGRES_PASSWORD={{ .superuser_pass }}
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/stunnel:latest" image = "iamthefij/stunnel:1.0.0"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"] args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
} }

View File

@ -3,6 +3,10 @@ job "redis-${name}" {
type = "service" type = "service"
priority = 80 priority = 80
update {
auto_revert = true
}
group "cache" { group "cache" {
count = 1 count = 1
@ -44,7 +48,7 @@ job "redis-${name}" {
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/stunnel:latest" image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
} }

View File

@ -1,5 +1,5 @@
pre-commit pre-commit
detect-secrets==1.4.0 # This should match what is in .pre-commit-config.yaml detect-secrets==1.4.0 # This should match what is in .pre-commit-config.yaml
ansible ansible
python-consul python-nomad
hvac netaddr

View File

@ -16,10 +16,13 @@ module "diun" {
DIUN_DEFAULTS_INCLUDETAGS = "^\\d+(\\.\\d+){0,2}$" DIUN_DEFAULTS_INCLUDETAGS = "^\\d+(\\.\\d+){0,2}$"
# Nomad API # Nomad API
# TODO: Use socket in $NOMAD_SECRETS_DIR/api.sock when we can assign workload ACLs with Terraform to NOMAD_ADDR = "unix:///secrets/api.sock"
# allow read access. Will need to update template to allow passing token by env DIUN_PROVIDERS_NOMAD = true
NOMAD_ADDR = "http://$${attr.unique.network.ip-address}:4646/" DIUN_PROVIDERS_NOMAD_SECRETID = "$${NOMAD_TOKEN}"
DIUN_PROVIDERS_NOMAD = true }
task_identity = {
env = true
} }
templates = [ templates = [
@ -36,3 +39,16 @@ module "diun" {
}, },
] ]
} }
resource "nomad_acl_policy" "diun_query_jobs" {
name = "diun-query-jobs"
description = "Allow diun to query jobs"
rules_hcl = <<EOH
namespace "default" {
capabilities = ["list-jobs", "read-job"]
}
EOH
job_acl {
job_id = module.diun.job_id
}
}

View File

@ -42,10 +42,19 @@ module "gitea" {
] ]
use_smtp = true use_smtp = true
mysql_bootstrap = { mysql_bootstrap = {
enabled = true enabled = true
} }
oidc_client_config = {
description = "Gitea"
redirect_uris = [
"https://git.thefij.rocks/user/oauth2/authelia/callback",
]
scopes = ["openid", "email", "profile"]
}
host_volumes = [ host_volumes = [
{ {
name = "gitea-data" name = "gitea-data"
@ -111,6 +120,49 @@ GITEA__mailer__PASSWD={{ .password }}
mount = false mount = false
dest = "env" dest = "env"
dest_prefix = "$${NOMAD_SECRETS_DIR}" dest_prefix = "$${NOMAD_SECRETS_DIR}"
} },
{
data = <<EOF
{{ with nomadVar "secrets/authelia/git" -}}
CLIENT_ID={{ .client_id }}
SECRET={{ .secret }}
{{- end }}
EOF
dest = "oauth.env"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
mount = false
change_mode = "script"
change_script = {
command = "/local/bootstrap_auth.sh"
}
},
{
data = <<EOF
#! /bin/bash
source {{ env "NOMAD_SECRETS_DIR" }}/oauth.env
auth_provider_id=$(su -- git gitea admin auth list | awk '/authelia/ { print $1 }')
if [ -z "$auth_provider_id" ]; then
echo "Creating Authelia OAuth provider"
su -- git gitea admin auth add-oauth \
--name authelia \
--provider openidConnect \
--key "$CLIENT_ID" \
--secret "$SECRET" \
--auto-discover-url https://authelia.thefij.rocks/.well-known/openid-configuration \
--skip-local-2fa
else
echo "Updating Authelia OAuth provider"
su -- git gitea admin auth update-oauth \
--id $auth_provider_id \
--key "$CLIENT_ID" \
--secret "$SECRET"
fi
EOF
dest = "bootstrap_auth.sh"
perms = "777"
change_mode = "noop"
mount = false
},
] ]
} }

25
services/languagetool.tf Normal file
View File

@ -0,0 +1,25 @@
module "languagetool" {
source = "./service"
name = "languagetool"
image = "ghcr.io/erikvl87/docker-languagetool/languagetool:4.8"
ingress = true
service_port = 8010
use_wesher = var.use_wesher
env = {
Java_Xmx = "512m"
}
service_check = {
path = "/v2/healthcheck"
}
# Possibility to use a volume over nfs to host n-gram datasets
# https://github.com/Erikvl87/docker-languagetool/pkgs/container/docker-languagetool%2Flanguagetool#using-n-gram-datasets
resources = {
cpu = 100
memory = 512
}
}

View File

@ -21,15 +21,6 @@ monitors:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
- 'https://grafana.thefij.rocks' - 'https://grafana.thefij.rocks'
- name: Plex
command:
- 'curl'
- '--silent'
- '--show-error'
- '-o'
- '/dev/null'
- 'http://192.168.2.10:32400'
- name: NZBget - name: NZBget
command: command:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
@ -45,6 +36,11 @@ monitors:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
- 'https://lidarr.thefij.rocks' - 'https://lidarr.thefij.rocks'
- name: Radarr
command:
- '/app/scripts/curl_ok.sh'
- 'https://radarr.thefij.rocks'
- name: Authelia - name: Authelia
command: command:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
@ -55,6 +51,20 @@ monitors:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
- 'https://photoprism.thefij.rocks' - 'https://photoprism.thefij.rocks'
- name: Prometheus
command:
- '/app/scripts/curl_ok.sh'
- 'https://prometheus.thefij.rocks'
- name: Plex
command:
- 'curl'
- '--silent'
- '--show-error'
- '-o'
- '/dev/null'
- 'http://192.168.2.10:32400'
alerts: alerts:
log: log:
command: command:

View File

@ -7,6 +7,10 @@ job "fixers" {
prohibit_overlap = true prohibit_overlap = true
} }
meta = {
"diun.enable" = false
}
group "orphaned_services" { group "orphaned_services" {
task "orphaned_services" { task "orphaned_services" {
driver = "docker" driver = "docker"
@ -37,7 +41,7 @@ job "fixers" {
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/nomad-service-fixers:0.1.0" image = "iamthefij/nomad-service-fixers:0.1.1"
command = "/scripts/nomad_missing_services.py" command = "/scripts/nomad_missing_services.py"
args = ["--restart"] args = ["--restart"]
} }

View File

@ -1,8 +1,8 @@
module "photoprism_module" { module "photoprism" {
source = "./service" source = "./service"
name = "photoprism" name = "photoprism"
image = "photoprism/photoprism:231128" image = "photoprism/photoprism:240711"
image_pull_timeout = "10m" image_pull_timeout = "10m"
# constraints = [{ # constraints = [{
# attribute = "$${meta.hw_transcode.type}" # attribute = "$${meta.hw_transcode.type}"
@ -37,18 +37,21 @@ module "photoprism_module" {
ingress = true ingress = true
service_port = 2342 service_port = 2342
use_wesher = var.use_wesher use_wesher = var.use_wesher
ingress_middlewares = [
"authelia@nomad"
]
mysql_bootstrap = { mysql_bootstrap = {
enabled = true enabled = true
} }
oidc_client_config = {
description = "Photoprism"
redirect_uris = [
"https://photoprism.thefij.rocks/api/v1/oidc/redirect",
]
scopes = ["openid", "email", "profile"]
}
env = { env = {
PHOTOPRISM_DEBUG = true PHOTOPRISM_DEBUG = true
# Make public since we added Authelia at the proxy level
PHOTOPRISM_AUTH_MODE = "public"
# UI # UI
PHOTOPRISM_SITE_CAPTION = "AI-Powered Photos App" PHOTOPRISM_SITE_CAPTION = "AI-Powered Photos App"
PHOTOPRISM_SITE_DESCRIPTION = "Fijolek home photos" PHOTOPRISM_SITE_DESCRIPTION = "Fijolek home photos"
@ -56,7 +59,8 @@ module "photoprism_module" {
PHOTOPRISM_SITE_URL = "https://photoprism.${var.base_hostname}/" PHOTOPRISM_SITE_URL = "https://photoprism.${var.base_hostname}/"
PHOTOPRISM_SPONSOR = "true" PHOTOPRISM_SPONSOR = "true"
# Worker config # Worker config
PHOTOPRISM_WORKERS = 2 PHOTOPRISM_WORKERS = 2
PHOTOPRISM_BACKUP_DATABASE = false
# Paths # Paths
PHOTOPRISM_ORIGINALS_PATH = "/photoprism-media/Library" PHOTOPRISM_ORIGINALS_PATH = "/photoprism-media/Library"
PHOTOPRISM_IMPORT_PATH = "/photoprism-media/Import" PHOTOPRISM_IMPORT_PATH = "/photoprism-media/Import"
@ -65,6 +69,12 @@ module "photoprism_module" {
PHOTOPRISM_UID = 500 PHOTOPRISM_UID = 500
PHOTOPRISM_GID = 100 PHOTOPRISM_GID = 100
PHOTOPRISM_UMASK = 0000 PHOTOPRISM_UMASK = 0000
# OIDC
PHOTOPRISM_OIDC_URI = "https://authelia.thefij.rocks"
PHOTOPRISM_OIDC_PROVIDER = "Authelia"
PHOTOPRISM_OIDC_REGISTER = true
PHOTOPRISM_OIDC_REDIRECT = false
PHOTOPRISM_OIDC_SCOPES = "openid email profile"
} }
templates = [ templates = [
@ -86,6 +96,10 @@ module "photoprism_module" {
PHOTOPRISM_FFMPEG_ENCODER=intel PHOTOPRISM_FFMPEG_ENCODER=intel
PHOTOPRISM_INIT="intel tensorflow" PHOTOPRISM_INIT="intel tensorflow"
{{- end }} {{- end }}
{{ with nomadVar "secrets/authelia/photoprism" -}}
PHOTOPRISM_OIDC_CLIENT={{ .client_id }}
PHOTOPRISM_OIDC_SECRET={{ .secret }}
{{- end }}
EOF EOF
dest_prefix = "$${NOMAD_SECRETS_DIR}/" dest_prefix = "$${NOMAD_SECRETS_DIR}/"
dest = "env" dest = "env"
@ -93,4 +107,13 @@ module "photoprism_module" {
mount = false mount = false
}, },
] ]
actions = [
{
name = "import"
command = "photoprism"
args = ["import", "/photoprism-media/Import"]
cron = "@daily"
},
]
} }

View File

@ -8,10 +8,13 @@ resource "nomad_job" "service" {
args = var.args args = var.args
env = var.env env = var.env
task_meta = var.task_meta task_meta = var.task_meta
task_identity = var.task_identity
group_meta = var.group_meta group_meta = var.group_meta
job_meta = var.job_meta job_meta = var.job_meta
constraints = var.constraints constraints = var.constraints
docker_devices = var.docker_devices docker_devices = var.docker_devices
user = var.user
actions = var.actions
service_port = var.service_port service_port = var.service_port
service_port_static = var.service_port_static service_port_static = var.service_port_static
@ -223,3 +226,53 @@ EOH
task = var.name task = var.name
} }
} }
module "oidc_client" {
count = var.oidc_client_config != null ? 1 : 0
source = "../../core/oidc_client"
name = var.name
oidc_client_config = {
description = var.oidc_client_config.description
authorization_policy = var.oidc_client_config.authorization_policy
redirect_uris = var.oidc_client_config.redirect_uris
scopes = var.oidc_client_config.scopes
}
job_acl = {
job_id = resource.nomad_job.service.id
group = var.name
task = var.name
}
}
# Action cron jobs
resource "nomad_job" "action_cron" {
for_each = tomap({ for action in var.actions : action.name => action if action.cron != null })
jobspec = templatefile("${path.module}/service_scheduled.nomad", {
name = var.name
action_name = each.value.name
action_cron = each.value.cron
})
}
resource "nomad_acl_policy" "action_cron_workload_policy" {
for_each = resource.nomad_job.action_cron
name = "service-action-${each.value.id}"
description = "Give custom service cron actions access to execute actions."
rules_hcl = <<EOH
namespace "default" {
capabilities = [
"list-jobs",
"read-job",
"alloc-exec",
]
}
EOH
job_acl {
job_id = each.value.id
}
}

View File

@ -0,0 +1,39 @@
job "${name}-${action_name}" {
region = "global"
datacenters = ["dc1"]
type = "batch"
periodic {
cron = "${action_cron}"
}
group "main" {
task "${action_name}" {
driver = "docker"
config {
image = "hashicorp/nomad:$${attr.nomad.version}"
args = [
"job",
"action",
"-job",
"${name}",
"-group",
"${name}",
"-task",
"${name}",
"${action_name}"
]
}
env = {
NOMAD_ADDR = "unix:///secrets/api.sock"
}
identity {
env = true
}
}
}
}

View File

@ -5,6 +5,10 @@ job "${name}" {
type = "service" type = "service"
priority = ${priority} priority = ${priority}
update {
auto_revert = true
}
group "${name}" { group "${name}" {
count = ${count} count = ${count}
%{~ if length(job_meta) > 0 } %{~ if length(job_meta) > 0 }
@ -76,6 +80,9 @@ job "${name}" {
task "${name}" { task "${name}" {
driver = "docker" driver = "docker"
%{~ if user != null }
user = "${user}"
%{~ endif ~}
%{~ if length(task_meta) > 0 } %{~ if length(task_meta) > 0 }
meta = { meta = {
%{ for k, v in task_meta ~} %{ for k, v in task_meta ~}
@ -178,6 +185,14 @@ job "${name}" {
%{~ endfor ~} %{~ endfor ~}
} }
%{~ endif ~} %{~ endif ~}
%{~ for action in actions }
action "${action.name}" {
command = "${action.command}"
%{~ if length(action.args) > 0 ~}
args = ${jsonencode(action.args)}
%{~ endif ~}
}
%{~ endfor ~}
%{~ for volume in host_volumes } %{~ for volume in host_volumes }
volume_mount { volume_mount {
volume = "${volume.name}" volume = "${volume.name}"
@ -197,6 +212,9 @@ EOF
%{~ if template.right_delimiter != null } %{~ if template.right_delimiter != null }
right_delimiter = "${template.right_delimiter}" right_delimiter = "${template.right_delimiter}"
%{~ endif ~} %{~ endif ~}
%{~ if template.perms != null }
perms = "${template.perms}"
%{~ endif ~}
%{~ if template.change_mode != null } %{~ if template.change_mode != null }
change_mode = "${template.change_mode}" change_mode = "${template.change_mode}"
%{~ endif ~} %{~ endif ~}
@ -225,6 +243,12 @@ EOF
%{~ endif ~} %{~ endif ~}
} }
%{~ endif ~} %{~ endif ~}
%{~ if task_identity != null }
identity {
env = ${task_identity.env}
file = ${task_identity.file}
}
%{~ endif ~}
} }
%{~ if mysql_bootstrap != null } %{~ if mysql_bootstrap != null }
task "mysql-bootstrap" { task "mysql-bootstrap" {
@ -239,10 +263,10 @@ EOF
image = "mariadb:10" image = "mariadb:10"
args = [ args = [
"/usr/bin/timeout", "/usr/bin/timeout",
"2m", "20m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done", "until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do echo 'Retry in 10s'; sleep 10; done",
] ]
} }
@ -302,10 +326,10 @@ SELECT 'NOOP';
image = "postgres:14" image = "postgres:14"
args = [ args = [
"/usr/bin/timeout", "/usr/bin/timeout",
"2m", "20m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"until /bin/bash $${NOMAD_TASK_DIR}/bootstrap.sh; do sleep 10; done", "until /bin/bash $${NOMAD_TASK_DIR}/bootstrap.sh; do echo 'Retry in 10s'; sleep 10; done",
] ]
} }
@ -374,7 +398,7 @@ $$;
} }
config { config {
image = "iamthefij/stunnel:latest" image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
} }

View File

@ -21,7 +21,6 @@ variable "priority" {
description = "Scheduler priority of the service" description = "Scheduler priority of the service"
} }
variable "image" { variable "image" {
type = string type = string
description = "Image that should be run" description = "Image that should be run"
@ -33,12 +32,27 @@ variable "image_pull_timeout" {
description = "A time duration that controls how long Nomad will wait before cancelling an in-progress pull of the Docker image" description = "A time duration that controls how long Nomad will wait before cancelling an in-progress pull of the Docker image"
} }
variable "user" {
type = string
default = null
description = "User to be passed to the task driver for execution. [ user | user:group | uid | uid:gid | user:gid | uid:group ]"
}
variable "task_meta" { variable "task_meta" {
type = map(string) type = map(string)
default = {} default = {}
description = "Meta attributes to attach to the task" description = "Meta attributes to attach to the task"
} }
variable "task_identity" {
description = "Task workload identity"
type = object({
env = optional(bool, false)
file = optional(bool, false)
})
default = null
}
variable "group_meta" { variable "group_meta" {
type = map(string) type = map(string)
default = {} default = {}
@ -162,6 +176,7 @@ variable "templates" {
right_delimiter = optional(string) right_delimiter = optional(string)
mount = optional(bool, true) mount = optional(bool, true)
env = optional(bool, false) env = optional(bool, false)
perms = optional(string)
change_mode = optional(string) change_mode = optional(string)
change_signal = optional(string) change_signal = optional(string)
change_script = optional(object({ change_script = optional(object({
@ -269,6 +284,17 @@ variable "use_wesher" {
default = true default = true
} }
variable "actions" {
description = "Nomad actions that should be part of the main task"
type = list(object({
name = string
command = string
args = optional(list(string))
cron = optional(string)
}))
default = []
}
variable "service_check" { variable "service_check" {
description = "Health check for main ingress service" description = "Health check for main ingress service"
type = object({ type = object({
@ -282,3 +308,15 @@ variable "service_check" {
default = {} default = {}
} }
variable "oidc_client_config" {
description = "Authelia oidc client configuration to enable oidc authentication"
type = object({
description = string
authorization_policy = optional(string, "one_factor")
redirect_uris = list(string)
scopes = list(string)
})
default = null
}

View File

@ -39,6 +39,11 @@ module "sonarr" {
dest = "/media" dest = "/media"
read_only = false read_only = false
}, },
{
name = "media-overflow-write"
dest = "/media-overflow"
read_only = false
},
] ]
resources = { resources = {

View File

@ -7,13 +7,17 @@ job "unifi-traffic-route-ips" {
prohibit_overlap = true prohibit_overlap = true
} }
meta = {
"diun.enable" = false
}
group "main" { group "main" {
task "main" { task "main" {
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/unifi-traffic-routes:0.0.2" image = "iamthefij/unifi-traffic-routes:0.0.4"
} }
env = { env = {

35
services/ytdl.tf Normal file
View File

@ -0,0 +1,35 @@
module "ytdl-web" {
source = "./service"
name = "ytdl-web"
image = "iamthefij/ytdl-web:0.1.4"
args = ["poetry", "run", "python", "-m", "ytdl_web.web", "--downloader"]
ingress = true
service_port = 5000
use_wesher = var.use_wesher
# service_check = null
user = "1001:100"
env = {
QUEUE_DIR = "/data/queue"
OUTPUT_TMPL = "/media/RomeTube/%(uploader)s%(channel)s/%(title)s.%(ext)s"
}
resources = {
cpu = 50
memory = 150
}
host_volumes = [
{
name = "ytdl-web"
dest = "/data"
read_only = false
},
{
name = "media-write"
dest = "/media"
read_only = false
},
]
}