Compare commits

..

1 Commits

Author SHA1 Message Date
d1884e2715 Update Ansible inventory to split node roles
Splits servers and clients to their own groups so that plays can target
specific roles.

Prior, everything was "both", but i want to and another server for
recovery purposes but not host containers on it.
2024-04-27 20:10:23 -07:00
46 changed files with 175 additions and 1019 deletions

View File

@ -132,7 +132,7 @@
"filename": "core/authelia.yml", "filename": "core/authelia.yml",
"hashed_secret": "a32b08d97b1615dc27f58b6b17f67624c04e2c4f", "hashed_secret": "a32b08d97b1615dc27f58b6b17f67624c04e2c4f",
"is_verified": false, "is_verified": false,
"line_number": 201, "line_number": 189,
"is_secret": false "is_secret": false
} }
], ],
@ -187,5 +187,5 @@
} }
] ]
}, },
"generated_at": "2024-08-30T18:12:43Z" "generated_at": "2024-02-20T18:04:29Z"
} }

View File

@ -1,6 +1,6 @@
resource "nomad_acl_policy" "anon_policy" { resource "nomad_acl_policy" "anon_policy" {
name = "anonymous" name = "anonymous"
description = "Anon read only" description = "Anon RO"
rules_hcl = file("${path.module}/nomad-anon-policy.hcl") rules_hcl = file("${path.module}/nomad-anon-policy.hcl")
} }

View File

@ -24,20 +24,20 @@ all:
group: "999" group: "999"
mode: "0755" mode: "0755"
read_only: false read_only: false
# n2.thefij: n2.thefij:
# nomad_node_class: ingress nomad_node_class: ingress
# nomad_reserved_memory: 1024 nomad_reserved_memory: 1024
# nfs_mounts: nfs_mounts:
# - src: 10.50.250.2:/srv/volumes - src: 10.50.250.2:/srv/volumes
# path: /srv/volumes/moxy path: /srv/volumes/moxy
# opts: proto=tcp,rw opts: proto=tcp,rw
# nomad_unique_host_volumes: nomad_unique_host_volumes:
# - name: nextcloud-data - name: nextcloud-data
# path: /srv/volumes/nextcloud path: /srv/volumes/nextcloud
# owner: "root" owner: "root"
# group: "bin" group: "bin"
# mode: "0755" mode: "0755"
# read_only: false read_only: false
pi4: pi4:
nomad_node_class: ingress nomad_node_class: ingress
nomad_reserved_memory: 512 nomad_reserved_memory: 512
@ -58,15 +58,12 @@ nomad_instances:
nomad_clients: {} nomad_clients: {}
nomad_servers: nomad_servers:
hosts: hosts:
nonopi.thefij: nonopi.thefij: {}
ansible_host: 192.168.2.170 children:
n1.thefij: {} all: {}
# n2.thefij: {}
pi4: {}
# qnomad.thefij: {}
nomad_clients: nomad_clients:
hosts: hosts:
n1.thefij: {} n1.thefij: {}
# n2.thefij: {} n2.thefij: {}
pi4: {} pi4: {}
# qnomad.thefij: {} qnomad.thefij: {}

View File

@ -56,10 +56,6 @@
path: /srv/volumes/media-write path: /srv/volumes/media-write
opts: proto=tcp,port=2049,rw opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Overflow
path: /srv/volumes/nas-overflow
opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Photos - src: 192.168.2.10:/Photos
path: /srv/volumes/photos path: /srv/volumes/photos
opts: proto=tcp,port=2049,rw opts: proto=tcp,port=2049,rw
@ -101,12 +97,6 @@
group: "root" group: "root"
mode: "0755" mode: "0755"
read_only: false read_only: false
- name: media-overflow-write
path: /srv/volumes/nas-overflow/Media
owner: "root"
group: "root"
mode: "0755"
read_only: false
- name: media-downloads - name: media-downloads
path: /srv/volumes/media-write/Downloads path: /srv/volumes/media-write/Downloads
read_only: false read_only: false
@ -137,9 +127,6 @@
- name: gitea-data - name: gitea-data
path: /srv/volumes/nas-container/gitea path: /srv/volumes/nas-container/gitea
read_only: false read_only: false
- name: ytdl-web
path: /srv/volumes/nas-container/ytdl-web
read_only: false
- name: all-volumes - name: all-volumes
path: /srv/volumes path: /srv/volumes
owner: "root" owner: "root"
@ -150,10 +137,10 @@
roles: roles:
- name: ansible-nomad - name: ansible-nomad
vars: vars:
nomad_version: "1.8.4-1" nomad_version: "1.7.6-1"
nomad_install_upgrade: true nomad_install_upgrade: true
nomad_allow_purge_config: true nomad_allow_purge_config: true
nomad_node_role: "{% if 'nomad_clients' in group_names %}{% if 'nomad_servers' in group_names %}both{% else %}client{% endif %}{% else %}server{% endif %}" nomad_node_role: "{% if 'nomad_clients' in group_names %}{% if 'nomad_servers' in group_names %}both{% else %}client{% endif %}{% else %}server{% endif %}
# Where nomad gets installed to # Where nomad gets installed to
nomad_bin_dir: /usr/bin nomad_bin_dir: /usr/bin
@ -308,7 +295,7 @@
- acl - acl
- policy - policy
- apply - apply
- -description=Anon read only - -description="Anon read only"
- anonymous - anonymous
- /tmp/anonymous.policy.hcl - /tmp/anonymous.policy.hcl
environment: environment:

View File

@ -116,7 +116,6 @@ nomad/jobs/photoprism:
db_name: VALUE db_name: VALUE
db_pass: VALUE db_pass: VALUE
db_user: VALUE db_user: VALUE
oidc_secret: VALUE
nomad/jobs/postgres-server: nomad/jobs/postgres-server:
superuser: VALUE superuser: VALUE
superuser_pass: VALUE superuser_pass: VALUE

View File

@ -190,7 +190,7 @@ job "Dummy" {
} }
config { config {
image = "iamthefij/stunnel:1.0.0" image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
} }

View File

@ -8,7 +8,7 @@ resource "nomad_job" "backup" {
resource "nomad_job" "backup-oneoff" { resource "nomad_job" "backup-oneoff" {
# TODO: Get list of nomad hosts dynamically # TODO: Get list of nomad hosts dynamically
for_each = toset(["n1", "pi4"]) for_each = toset(["n1", "n2", "pi4"])
# for_each = toset([ # for_each = toset([
# for node in data.consul_service.nomad.service : # for node in data.consul_service.nomad.service :
# node.node_name # node.node_name
@ -24,7 +24,7 @@ resource "nomad_job" "backup-oneoff" {
locals { locals {
# NOTE: This can't be dynamic in first deploy since these values are not known # NOTE: This can't be dynamic in first deploy since these values are not known
# all_job_ids = toset(flatten([[for job in resource.nomad_job.backup-oneoff : job.id], [resource.nomad_job.backup.id]])) # all_job_ids = toset(flatten([[for job in resource.nomad_job.backup-oneoff : job.id], [resource.nomad_job.backup.id]]))
all_job_ids = toset(["backup", "backup-oneoff-n1", "backup-oneoff-pi4"]) all_job_ids = toset(["backup", "backup-oneoff-n1", "backup-oneoff-n2", "backup-oneoff-pi4"])
} }
resource "nomad_acl_policy" "secrets_mysql" { resource "nomad_acl_policy" "secrets_mysql" {

View File

@ -114,9 +114,6 @@ namespace "default" {
path "authelia/*" { path "authelia/*" {
capabilities = ["read"] capabilities = ["read"]
} }
path "secrets/authelia/*" {
capabilities = ["read"]
}
} }
} }
EOH EOH
@ -145,22 +142,6 @@ EOH
} }
} }
# Enable oidc for nomad clients
module "nomad_oidc_client" {
source = "./oidc_client"
name = "nomad"
oidc_client_config = {
description = "Nomad"
authorization_policy = "two_factor"
redirect_uris = [
"https://nomad.${var.base_hostname}/oidc/callback",
"https://nomad.${var.base_hostname}/ui/settings/tokens",
]
scopes = ["openid", "groups"]
}
}
resource "nomad_acl_auth_method" "nomad_authelia" { resource "nomad_acl_auth_method" "nomad_authelia" {
name = "authelia" name = "authelia"
type = "OIDC" type = "OIDC"
@ -170,8 +151,8 @@ resource "nomad_acl_auth_method" "nomad_authelia" {
config { config {
oidc_discovery_url = "https://authelia.${var.base_hostname}" oidc_discovery_url = "https://authelia.${var.base_hostname}"
oidc_client_id = module.nomad_oidc_client.client_id oidc_client_id = "nomad"
oidc_client_secret = module.nomad_oidc_client.secret oidc_client_secret = yamldecode(file("${path.module}/../ansible_playbooks/vars/nomad_vars.yml"))["nomad/oidc"]["secret"]
bound_audiences = ["nomad"] bound_audiences = ["nomad"]
oidc_scopes = [ oidc_scopes = [
"groups", "groups",

View File

@ -151,18 +151,6 @@ access_control:
networks: 192.168.5.0/24 networks: 192.168.5.0/24
rules: rules:
## Allow favicons on internal network
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
resources:
- '^/apple-touch-icon-precomposed\.png$'
- '^/assets/safari-pinned-tab\.svg$'
- '^/apple-touch-icon-180x180\.png$'
- '^/apple-touch-icon\.png$'
- '^/favicon\.ico$'
networks:
- internal
policy: bypass
{{ range nomadVarList "authelia/access_control/service_rules" }}{{ with nomadVar .Path }} {{ range nomadVarList "authelia/access_control/service_rules" }}{{ with nomadVar .Path }}
- domain: '{{ .name }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}' - domain: '{{ .name }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
{{ .rule.Value | indent 6 }} {{ .rule.Value | indent 6 }}
@ -235,7 +223,7 @@ storage:
## The available providers are: filesystem, smtp. You must use only one of these providers. ## The available providers are: filesystem, smtp. You must use only one of these providers.
notifier: notifier:
## You can disable the notifier startup check by setting this to true. ## You can disable the notifier startup check by setting this to true.
disable_startup_check: true disable_startup_check: false
{{ with nomadVar "secrets/smtp" }} {{ with nomadVar "secrets/smtp" }}
smtp: smtp:
@ -261,18 +249,4 @@ identity_providers:
# hmac_secret: <file> # hmac_secret: <file>
# issuer_private_key: <file> # issuer_private_key: <file>
clients: clients: {{ with nomadVar "nomad/jobs/authelia" }}{{ .oidc_clients.Value }}{{ end }}
{{ range nomadVarList "authelia/access_control/oidc_clients" -}}
{{- $name := (sprig_last (sprig_splitList "/" .Path)) -}}
{{ "-" | indent 6 }}
{{ with nomadVar .Path }}
{{- $im := .ItemsMap -}}
{{- $im = sprig_set $im "redirect_uris" (.redirect_uris.Value | parseYAML) -}}
{{- $im = sprig_set $im "scopes" (.scopes.Value | parseYAML) -}}
{{- with nomadVar (printf "secrets/authelia/%s" $name) -}}
{{- $im = sprig_set $im "secret" .secret_hash.Value -}}
{{- end -}}
{{ $im | toYAML | indent 8 }}
{{ end }}
{{ end }}

View File

@ -1,23 +1,20 @@
variable "config_data" {
type = string
description = "Plain text config file for blocky"
}
job "blocky" { job "blocky" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "system"
priority = 100 priority = 100
constraint {
distinct_hosts = true
}
update { update {
max_parallel = 1 max_parallel = 1
auto_revert = true # TODO: maybe switch to service job from system so we can use canary and autorollback
min_healthy_time = "60s" # auto_revert = true
healthy_deadline = "5m"
} }
group "blocky" { group "blocky" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/12023
count = 2
network { network {
mode = "bridge" mode = "bridge"
@ -65,11 +62,6 @@ job "blocky" {
path = "/" path = "/"
interval = "10s" interval = "10s"
timeout = "3s" timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
} }
} }
@ -77,21 +69,11 @@ job "blocky" {
driver = "docker" driver = "docker"
config { config {
image = "ghcr.io/0xerr0r/blocky:v0.24" image = "ghcr.io/0xerr0r/blocky:v0.22"
args = ["-c", "$${NOMAD_TASK_DIR}/config.yml"] args = ["-c", "$${NOMAD_TASK_DIR}/config.yml"]
ports = ["dns", "api"] ports = ["dns", "api"]
} }
action "refresh-lists" {
command = "/app/blocky"
args = ["lists", "refresh"]
}
action "healthcheck" {
command = "/app/blocky"
args = ["healthcheck"]
}
resources { resources {
cpu = 50 cpu = 50
memory = 75 memory = 75
@ -99,9 +81,7 @@ job "blocky" {
} }
template { template {
data = <<EOF data = var.config_data
${file("${module_path}/config.yml")}
EOF
destination = "$${NOMAD_TASK_DIR}/config.yml" destination = "$${NOMAD_TASK_DIR}/config.yml"
splay = "1m" splay = "1m"
@ -127,121 +107,6 @@ EOF
max = "20s" max = "20s"
} }
} }
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/user" }}
{{ with nomadVar "blocky_lists/user" -}}
{{ .block_list.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/block"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "30s"
max = "1m"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/user" }}
{{ with nomadVar "blocky_lists/user" -}}
{{ .allow_list.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/allow"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "30s"
max = "1m"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .smarttv_regex.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/smarttv-regex.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .wemo.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/wemo.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .sonos.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/sonos.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
} }
task "stunnel" { task "stunnel" {
@ -253,7 +118,7 @@ EOF
} }
config { config {
image = "iamthefij/stunnel:1.0.0" image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
} }
@ -269,13 +134,6 @@ syslog = no
foreground = yes foreground = yes
delay = yes delay = yes
[dns_server]
# Dummy server to keep stunnel running if no mysql is present
accept = 8053
connect = 127.0.0.1:53
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}} {{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
[mysql_client] [mysql_client]
client = yes client = yes
@ -321,9 +179,11 @@ EOF
config { config {
image = "mariadb:10" image = "mariadb:10"
args = [ args = [
"/usr/bin/timeout",
"2m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"/usr/bin/timeout 2m /bin/bash -c \"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do echo 'Retry in 10s'; sleep 10; done\" || true", "until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
] ]
} }

View File

@ -1,7 +1,16 @@
locals {
config_data = file("${path.module}/config.yml")
}
resource "nomad_job" "blocky" { resource "nomad_job" "blocky" {
hcl2 {
vars = {
"config_data" = local.config_data,
}
}
jobspec = templatefile("${path.module}/blocky.nomad", { jobspec = templatefile("${path.module}/blocky.nomad", {
use_wesher = var.use_wesher, use_wesher = var.use_wesher,
module_path = path.module,
}) })
} }
@ -57,32 +66,3 @@ EOH
task = "stunnel" task = "stunnel"
} }
} }
resource "nomad_variable" "blocky_lists_terraform" {
path = "blocky_lists/terraform"
items = {
smarttv_regex = file("${path.module}/list-smarttv-regex.txt")
wemo = file("${path.module}/list-wemo.txt")
sonos = file("${path.module}/list-sonos.txt")
}
}
resource "nomad_acl_policy" "blocky_lists" {
name = "blocky-lists"
description = "Give access Blocky lists"
rules_hcl = <<EOH
namespace "default" {
variables {
path "blocky_lists/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "blocky"
}
}

View File

@ -2,9 +2,6 @@ ports:
dns: 53 dns: 53
http: 4000 http: 4000
# I must have ip v6 blocked or something
connectIPVersion: v4
bootstrapDns: bootstrapDns:
- upstream: 1.1.1.1 - upstream: 1.1.1.1
- upstream: 1.0.0.1 - upstream: 1.0.0.1
@ -13,42 +10,40 @@ bootstrapDns:
upstreams: upstreams:
init:
strategy: fast
groups: groups:
default: default:
- https://dns.quad9.net/dns-query - https://dns.quad9.net/dns-query
- tcp-tls:dns.quad9.net - tcp-tls:dns.quad9.net
- https://one.one.one.one/dns-query - https://one.one.one.one/dns-query
- tcp-tls:one.one.one.one - tcp-tls:one.one.one.one
# cloudflare: cloudflare:
# - 1.1.1.1 - 1.1.1.1
# - 1.0.0.1 - 1.0.0.1
# - 2606:4700:4700::1111 - 2606:4700:4700::1111
# - 2606:4700:4700::1001 - 2606:4700:4700::1001
# - https://one.one.one.one/dns-query - https://one.one.one.one/dns-query
# - tcp-tls:one.one.one.one - tcp-tls:one.one.one.one
# quad9: quad9:
# - 9.9.9.9 - 9.9.9.9
# - 149.112.112.112 - 149.112.112.112
# - 2620:fe::fe - 2620:fe::fe
# - 2620:fe::9 - 2620:fe::9
# - https://dns.quad9.net/dns-query - https://dns.quad9.net/dns-query
# - tcp-tls:dns.quad9.net - tcp-tls:dns.quad9.net
# quad9-secured: quad9-secured:
# - 9.9.9.11 - 9.9.9.11
# - 149.112.112.11 - 149.112.112.11
# - 2620:fe::11 - 2620:fe::11
# - 2620:fe::fe:11 - 2620:fe::fe:11
# - https://dns11.quad9.net/dns-query - https://dns11.quad9.net/dns-query
# - tcp-tls:dns11.quad9.net - tcp-tls:dns11.quad9.net
# quad9-unsecured: quad9-unsecured:
# - 9.9.9.10 - 9.9.9.10
# - 149.112.112.10 - 149.112.112.10
# - 2620:fe::10 - 2620:fe::10
# - 2620:fe::fe:10 - 2620:fe::fe:10
# - https://dns10.quad9.net/dns-query - https://dns10.quad9.net/dns-query
# - tcp-tls:dns10.quad9.net - tcp-tls:dns10.quad9.net
conditional: conditional:
fallbackUpstream: false fallbackUpstream: false
@ -78,11 +73,19 @@ blocking:
- https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt - https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt
- https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt - https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
# - https://hosts-file.net/ad_servers.txt # - https://hosts-file.net/ad_servers.txt
iot: smarttv:
- https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV.txt - https://perflyst.github.io/PiHoleBlocklist/SmartTV.txt
- {{ env "NOMAD_TASK_DIR" }}/smarttv-regex.txt # - https://perflyst.github.io/PiHoleBlocklist/regex.list
- {{ env "NOMAD_TASK_DIR" }}/wemo.txt wemo:
- {{ env "NOMAD_TASK_DIR" }}/sonos.txt - |
# Remote commands
api.xbcs.net
# Firmware updates
fw.xbcs.net
# TURN service
nat.wemo2.com
# Connectivity checks
heartbeat.xwemo.com
antisocial: antisocial:
- | - |
facebook.com facebook.com
@ -91,20 +94,22 @@ blocking:
twitter.com twitter.com
youtube.com youtube.com
custom: custom:
- {{ env "NOMAD_TASK_DIR" }}/block - https://git.thefij.rocks/iamthefij/blocklists/raw/branch/main/block
whiteLists: whiteLists:
ads:
{{ with nomadVar "nomad/jobs/blocky" -}}
{{ .whitelists_ads.Value | indent 6 }}
{{- end }}
custom: custom:
- {{ env "NOMAD_TASK_DIR" }}/allow - https://git.thefij.rocks/iamthefij/blocklists/raw/branch/main/allow
clientGroupsBlock: clientGroupsBlock:
default: default:
- ads - ads
- custom - custom
192.168.3.1/24: - smarttv
- ads - wemo
- iot
- custom
customDNS: customDNS:
customTTL: 1h customTTL: 1h
@ -132,6 +137,7 @@ redis:
connectionCooldown: 3s connectionCooldown: 3s
{{ end -}} {{ end -}}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}} {{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
{{ with nomadVar "nomad/jobs/blocky" -}} {{ with nomadVar "nomad/jobs/blocky" -}}
queryLog: queryLog:

View File

@ -1,13 +0,0 @@
# From: https://perflyst.github.io/PiHoleBlocklist/regex.list
# Title: Perflyst's SmartTV Blocklist for Pi-hole - RegEx extension
# Version: 13July2023v1
# Samsung
/(^|\.)giraffic\.com$/
/(^|\.)internetat\.tv$/
/(^|\.)pavv\.co\.kr$/
/(^|\.)samsungcloudsolution\.net$/
/(^|\.)samsungelectronics\.com$/
/(^|\.)samsungrm\.net$/
# /(^|\.)samsungotn\.net$/ # prevents updates
# /(^|\.)samsungcloudcdn\.com$/ # prevents updates
# /(^|\.)samsungcloudsolution\.com$/ # prevents internet connection

View File

@ -1,2 +0,0 @@
# Block Sonos devices from phoning home and allowing remote access
/(^|\.)sonos\.com$/

View File

@ -1,8 +0,0 @@
# Remote commands
api.xbcs.net
# Firmware updates
fw.xbcs.net
# TURN service
nat.wemo2.com
# Connectivity checks
heartbeat.xwemo.com

View File

@ -1,16 +1,8 @@
job "exporters" { job "exporters" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "system"
priority = 55
constraint {
distinct_hosts = true
}
group "promtail" { group "promtail" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/1202
count = 2
network { network {
mode = "bridge" mode = "bridge"

View File

@ -40,7 +40,7 @@ job "grafana" {
} }
config { config {
image = "iamthefij/stunnel:1.0.0" image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
} }
@ -86,10 +86,10 @@ EOF
image = "mariadb:10" image = "mariadb:10"
args = [ args = [
"/usr/bin/timeout", "/usr/bin/timeout",
"20m", "2m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do echo 'Retry in 10s'; sleep 10; done", "until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
] ]
} }
@ -155,6 +155,7 @@ GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }}
GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .minio_access_key }} GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .minio_access_key }}
GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .minio_secret_key }} GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .minio_secret_key }}
GRAFANA_ALERT_EMAIL_ADDRESSES={{ .alert_email_addresses }} GRAFANA_ALERT_EMAIL_ADDRESSES={{ .alert_email_addresses }}
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET={{ .oidc_secret }}
{{ if .db_name -}} {{ if .db_name -}}
# Database storage # Database storage
GF_DATABASE_TYPE=mysql GF_DATABASE_TYPE=mysql
@ -166,10 +167,6 @@ GF_DATABASE_PASSWORD={{ .db_pass }}
SLACK_BOT_URL={{ .slack_bot_url }} SLACK_BOT_URL={{ .slack_bot_url }}
SLACK_BOT_TOKEN={{ .slack_bot_token }} SLACK_BOT_TOKEN={{ .slack_bot_token }}
SLACK_HOOK_URL={{ .slack_hook_url }} SLACK_HOOK_URL={{ .slack_hook_url }}
{{ end -}}
{{ with nomadVar "secrets/authelia/grafana" -}}
GF_AUTH_GENERIC_OAUTH_CLIENT_ID={{ .client_id }}
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET={{ .secret }}
{{ end -}} {{ end -}}
EOF EOF
env = true env = true

View File

@ -261,7 +261,7 @@ log_queries =
enabled = true enabled = true
name = Authelia name = Authelia
;allow_sign_up = true ;allow_sign_up = true
client_id = from_env client_id = grafana
client_secret = from_env client_secret = from_env
scopes = openid profile email groups scopes = openid profile email groups
auth_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/authorization auth_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/authorization

View File

@ -29,6 +29,7 @@ job "lego" {
driver = "exec" driver = "exec"
config { config {
# image = "alpine:3"
command = "/bin/bash" command = "/bin/bash"
args = ["${NOMAD_TASK_DIR}/start.sh"] args = ["${NOMAD_TASK_DIR}/start.sh"]
} }

View File

@ -93,27 +93,3 @@ EOH
task = "stunnel" task = "stunnel"
} }
} }
module "grafana_oidc" {
source = "./oidc_client"
name = "grafana"
oidc_client_config = {
description = "Grafana"
scopes = [
"openid",
"groups",
"email",
"profile",
]
redirect_uris = [
"https://grafana.thefij.rocks/login/generic_oauth",
]
}
job_acl = {
job_id = "grafana"
group = "grafana"
task = "grafana"
}
}

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.3.1"
hashes = [
"h1:lMueBNB2GJ/a5rweL9NPybwVfDH/Q1s+rQvt5Y+kuYs=",
"zh:1e7893a3fbebff171bcc5581b70a16eea33193c7e9dd73402ba5c04b7202f0bb",
"zh:252cfd3fee4811c83bc74406ba1bc1bbb83d6de20e50a86f93737f8f86864171",
"zh:387a7140be6dfa3f8d27f09d1eb2b9f3b84900328fe5a0478e9b3bd91a845808",
"zh:49848fa491ac26b0568b112a57d14cc49772607c7cf405e2f74dd537407214b1",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:7b9f345f5bb5f17c5d0bc3d373c25828934a3cbcdb331e0eab54eb47f1355fb2",
"zh:8e276f4de508a86e725fffc02ee891db73397c35dbd591d8918af427eeec93a1",
"zh:90b349933d2fd28f822a36128be4625bb816aa9f20ec314c79c77306f632ae87",
"zh:a0ca6fd6cd94a52684e432104d3dc170a74075f47d9d4ba725cc340a438ed75a",
"zh:a6cffc45535a0ff8206782538b3eeaef17dc93d0e1fd58bc1e6f7d5aa0f6ba1a",
"zh:c010807b5d3e03d769419787b0e5d4efa6963134e1873a413102af6bf3dd1c49",
"zh:faf962ee1981e897e99f7e528642c7e74beed37afd8eaf743e6ede24df812d80",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.2"
hashes = [
"h1:wmG0QFjQ2OfyPy6BB7mQ57WtoZZGGV07uAPQeDmIrAE=",
"zh:0ef01a4f81147b32c1bea3429974d4d104bbc4be2ba3cfa667031a8183ef88ec",
"zh:1bcd2d8161e89e39886119965ef0f37fcce2da9c1aca34263dd3002ba05fcb53",
"zh:37c75d15e9514556a5f4ed02e1548aaa95c0ecd6ff9af1119ac905144c70c114",
"zh:4210550a767226976bc7e57d988b9ce48f4411fa8a60cd74a6b246baf7589dad",
"zh:562007382520cd4baa7320f35e1370ffe84e46ed4e2071fdc7e4b1a9b1f8ae9b",
"zh:5efb9da90f665e43f22c2e13e0ce48e86cae2d960aaf1abf721b497f32025916",
"zh:6f71257a6b1218d02a573fc9bff0657410404fb2ef23bc66ae8cd968f98d5ff6",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:9647e18f221380a85f2f0ab387c68fdafd58af6193a932417299cdcae4710150",
"zh:bb6297ce412c3c2fa9fec726114e5e0508dd2638cad6a0cb433194930c97a544",
"zh:f83e925ed73ff8a5ef6e3608ad9225baa5376446349572c2449c0c0b3cf184b7",
"zh:fbef0781cb64de76b1df1ca11078aecba7800d82fd4a956302734999cfd9a4af",
]
}

View File

@ -1,50 +0,0 @@
resource "random_password" "oidc_client_id" {
length = 72
override_special = "-._~"
}
resource "random_password" "oidc_secret" {
length = 72
override_special = "-._~"
}
resource "nomad_variable" "authelia_oidc_secret" {
path = "secrets/authelia/${var.name}"
items = {
client_id = resource.random_password.oidc_client_id.result
secret = resource.random_password.oidc_secret.result
secret_hash = resource.random_password.oidc_secret.bcrypt_hash
}
}
resource "nomad_variable" "authelia_access_control_oidc" {
path = "authelia/access_control/oidc_clients/${var.name}"
items = {
id = resource.random_password.oidc_client_id.result
description = var.oidc_client_config.description
authorization_policy = var.oidc_client_config.authorization_policy
redirect_uris = yamlencode(var.oidc_client_config.redirect_uris)
scopes = yamlencode(var.oidc_client_config.scopes)
}
}
resource "nomad_acl_policy" "oidc_authelia" {
count = var.job_acl != null ? 1 : 0
name = "${var.name}-authelia"
description = "Give access to shared authelia variables"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/authelia/${var.name}" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = var.job_acl.job_id
group = var.job_acl.group
task = var.job_acl.task
}
}

View File

@ -1,11 +0,0 @@
output "client_id" {
value = resource.random_password.oidc_client_id.result
}
output "secret" {
value = resource.random_password.oidc_secret.result
}
output "secret_hash" {
value = resource.random_password.oidc_secret.bcrypt_hash
}

View File

@ -1,25 +0,0 @@
variable "name" {
description = "Name of service"
type = string
}
variable "oidc_client_config" {
description = "Authelia oidc client configuration to enable oidc authentication"
type = object({
description = string
authorization_policy = optional(string, "one_factor")
redirect_uris = list(string)
scopes = list(string)
})
}
variable "job_acl" {
description = "Job ACL that should be given to the secrets"
type = object({
job_id = string
group = optional(string)
task = optional(string)
})
default = null
}

View File

@ -37,36 +37,12 @@ job "prometheus" {
"traefik.enable=true", "traefik.enable=true",
"traefik.http.routers.prometheus.entryPoints=websecure", "traefik.http.routers.prometheus.entryPoints=websecure",
] ]
check {
type = "http"
path = "/-/healthy"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
} }
service { service {
name = "pushgateway" name = "pushgateway"
provider = "nomad" provider = "nomad"
port = "pushgateway" port = "pushgateway"
check {
type = "http"
path = "/-/healthy"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
} }
task "prometheus" { task "prometheus" {

View File

@ -70,7 +70,7 @@ EOF
resources { resources {
cpu = 50 cpu = 50
memory = 50 memory = 20
} }
} }
} }
@ -134,7 +134,7 @@ EOF
resources { resources {
cpu = 50 cpu = 50
memory = 50 memory = 10
} }
} }
} }

View File

@ -14,11 +14,9 @@ job "traefik" {
update { update {
max_parallel = 1 max_parallel = 1
canary = 1 # canary = 1
auto_promote = false # auto_promote = true
auto_revert = true auto_revert = true
min_healthy_time = "30s"
healthy_deadline = "5m"
} }
group "traefik" { group "traefik" {
@ -90,7 +88,7 @@ job "traefik" {
} }
config { config {
image = "traefik:3.0" image = "traefik:2.10"
ports = ["web", "websecure", "syslog", "gitssh", "metrics"] ports = ["web", "websecure", "syslog", "gitssh", "metrics"]
network_mode = "host" network_mode = "host"
@ -114,14 +112,6 @@ job "traefik" {
} }
} }
env = {
TRAEFIK_PROVIDERS_NOMAD_ENDPOINT_TOKEN = "${NOMAD_TOKEN}"
}
identity {
env = true
}
template { template {
# Avoid conflict with TOML lists [[ ]] and Go templates {{ }} # Avoid conflict with TOML lists [[ ]] and Go templates {{ }}
left_delimiter = "<<" left_delimiter = "<<"
@ -174,7 +164,7 @@ job "traefik" {
exposedByDefault = false exposedByDefault = false
defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)" defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)"
[providers.nomad.endpoint] [providers.nomad.endpoint]
address = "unix:///secrets/api.sock" address = "http://127.0.0.1:4646"
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/traefik.toml" destination = "${NOMAD_TASK_DIR}/config/traefik.toml"
} }
@ -188,25 +178,14 @@ job "traefik" {
service = "nomad" service = "nomad"
rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)" rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
{{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path }} {{ with nomadVar "nomad/jobs/traefik" }}{{ with .external }}{{ with .Value | parseYAML -}}
[http.routers.{{ .name }}] {{ range $service, $url := . }}
[http.routers.{{ $service }}]
entryPoints = ["websecure"] entryPoints = ["websecure"]
service = "{{ .name }}" service = "{{ $service }}"
rule = "Host(`{{ .subdomain }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`){{ with .path_prefix.Value }}&&PathPrefix(`{{ . }}`){{ end }}" rule = "Host(`{{ $service }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
{{ $name := .name -}}
{{ with .path_prefix.Value -}}
middlewares = ["{{ $name }}@file"]
{{ end }} {{ end }}
{{- end }}{{ end }} {{- end }}{{ end }}{{ end }}
#[http.middlewares]
# {{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path -}}
# {{ $name := .name -}}
# {{ with .path_prefix.Value -}}
# [http.middlewares.{{ $name }}.stripPrefix]
# prefixes = ["{{ . }}"]
# {{ end }}
# {{- end }}{{ end }}
[http.services] [http.services]
[http.services.nomad] [http.services.nomad]
@ -214,12 +193,14 @@ job "traefik" {
[[http.services.nomad.loadBalancer.servers]] [[http.services.nomad.loadBalancer.servers]]
url = "http://127.0.0.1:4646" url = "http://127.0.0.1:4646"
{{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path }} {{ with nomadVar "nomad/jobs/traefik" }}{{ with .external }}{{ with .Value | parseYAML -}}
[http.services.{{ .name }}] {{ range $service, $url := . }}
[http.services.{{ .name }}.loadBalancer] [http.services.{{ $service }}]
[[http.services.{{ .name }}.loadBalancer.servers]] [http.services.{{ $service }}.loadBalancer]
url = "{{ .url }}" [[http.services.{{ $service }}.loadBalancer.servers]]
{{- end }}{{ end }} url = "{{ $url }}"
{{ end }}
{{- end }}{{ end }}{{ end }}
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/conf/route-hashi.toml" destination = "${NOMAD_TASK_DIR}/config/conf/route-hashi.toml"
change_mode = "noop" change_mode = "noop"

View File

@ -21,61 +21,3 @@ EOH
job_id = resource.nomad_job.traefik.id job_id = resource.nomad_job.traefik.id
} }
} }
resource "nomad_acl_policy" "traefik_query_jobs" {
name = "traefik-query-jobs"
description = "Allow traefik to query jobs"
rules_hcl = <<EOH
namespace "default" {
capabilities = ["list-jobs", "read-job"]
}
EOH
job_acl {
job_id = resource.nomad_job.traefik.id
}
}
resource "nomad_acl_policy" "treafik_external" {
name = "traefik-exernal"
description = "Read external services"
rules_hcl = <<EOH
namespace "default" {
variables {
path "traefik_external/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "traefik"
}
}
resource "nomad_variable" "traefik_external_hass" {
path = "traefik_external/hass"
items = {
name = "hass"
subdomain = "hass",
url = "http://192.168.3.65:8123"
}
}
resource "nomad_variable" "traefik_external_plex" {
path = "traefik_external/plex"
items = {
name = "plex"
subdomain = "plex",
url = "http://agnosticfront.thefij:32400"
}
}
resource "nomad_variable" "traefik_external_appdaemon" {
path = "traefik_external/appdaemon"
items = {
name = "appdaemon"
subdomain = "appdash",
url = "http://192.168.3.65:5050"
# path_prefix = "/add"
}
}

View File

@ -3,10 +3,6 @@ job "lldap" {
type = "service" type = "service"
priority = 80 priority = 80
update {
auto_revert = true
}
group "lldap" { group "lldap" {
network { network {
@ -145,7 +141,7 @@ user = "{{ .user }}"
image = "mariadb:10" image = "mariadb:10"
args = [ args = [
"/usr/bin/timeout", "/usr/bin/timeout",
"20m", "2m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done", "until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
@ -199,7 +195,7 @@ SELECT 'NOOP';
} }
config { config {
image = "iamthefij/stunnel:1.0.0" image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
} }

View File

@ -3,10 +3,6 @@ job "mysql-server" {
type = "service" type = "service"
priority = 80 priority = 80
update {
auto_revert = true
}
group "mysql-server" { group "mysql-server" {
count = 1 count = 1
@ -77,7 +73,7 @@ MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
resources { resources {
cpu = 300 cpu = 300
memory = 1600 memory = 1536
} }
} }
@ -85,7 +81,7 @@ MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/stunnel:1.0.0" image = "iamthefij/stunnel:latest"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"] args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
} }

View File

@ -3,10 +3,6 @@ job "postgres-server" {
type = "service" type = "service"
priority = 80 priority = 80
update {
auto_revert = true
}
group "postgres-server" { group "postgres-server" {
count = 1 count = 1
@ -77,8 +73,8 @@ POSTGRES_PASSWORD={{ .superuser_pass }}
resources { resources {
cpu = 500 cpu = 500
memory = 800 memory = 700
memory_max = 1500 memory_max = 1200
} }
} }
@ -86,7 +82,7 @@ POSTGRES_PASSWORD={{ .superuser_pass }}
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/stunnel:1.0.0" image = "iamthefij/stunnel:latest"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"] args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
} }

View File

@ -3,10 +3,6 @@ job "redis-${name}" {
type = "service" type = "service"
priority = 80 priority = 80
update {
auto_revert = true
}
group "cache" { group "cache" {
count = 1 count = 1
@ -48,7 +44,7 @@ job "redis-${name}" {
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/stunnel:1.0.0" image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"] ports = ["tls"]
} }

View File

@ -1,5 +1,5 @@
pre-commit pre-commit
detect-secrets==1.4.0 # This should match what is in .pre-commit-config.yaml detect-secrets==1.4.0 # This should match what is in .pre-commit-config.yaml
ansible ansible
python-nomad python-consul
netaddr hvac

View File

@ -16,13 +16,10 @@ module "diun" {
DIUN_DEFAULTS_INCLUDETAGS = "^\\d+(\\.\\d+){0,2}$" DIUN_DEFAULTS_INCLUDETAGS = "^\\d+(\\.\\d+){0,2}$"
# Nomad API # Nomad API
NOMAD_ADDR = "unix:///secrets/api.sock" # TODO: Use socket in $NOMAD_SECRETS_DIR/api.sock when we can assign workload ACLs with Terraform to
# allow read access. Will need to update template to allow passing token by env
NOMAD_ADDR = "http://$${attr.unique.network.ip-address}:4646/"
DIUN_PROVIDERS_NOMAD = true DIUN_PROVIDERS_NOMAD = true
DIUN_PROVIDERS_NOMAD_SECRETID = "$${NOMAD_TOKEN}"
}
task_identity = {
env = true
} }
templates = [ templates = [
@ -39,16 +36,3 @@ module "diun" {
}, },
] ]
} }
resource "nomad_acl_policy" "diun_query_jobs" {
name = "diun-query-jobs"
description = "Allow diun to query jobs"
rules_hcl = <<EOH
namespace "default" {
capabilities = ["list-jobs", "read-job"]
}
EOH
job_acl {
job_id = module.diun.job_id
}
}

View File

@ -42,19 +42,10 @@ module "gitea" {
] ]
use_smtp = true use_smtp = true
mysql_bootstrap = { mysql_bootstrap = {
enabled = true enabled = true
} }
oidc_client_config = {
description = "Gitea"
redirect_uris = [
"https://git.thefij.rocks/user/oauth2/authelia/callback",
]
scopes = ["openid", "email", "profile"]
}
host_volumes = [ host_volumes = [
{ {
name = "gitea-data" name = "gitea-data"
@ -120,49 +111,6 @@ GITEA__mailer__PASSWD={{ .password }}
mount = false mount = false
dest = "env" dest = "env"
dest_prefix = "$${NOMAD_SECRETS_DIR}" dest_prefix = "$${NOMAD_SECRETS_DIR}"
},
{
data = <<EOF
{{ with nomadVar "secrets/authelia/git" -}}
CLIENT_ID={{ .client_id }}
SECRET={{ .secret }}
{{- end }}
EOF
dest = "oauth.env"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
mount = false
change_mode = "script"
change_script = {
command = "/local/bootstrap_auth.sh"
} }
},
{
data = <<EOF
#! /bin/bash
source {{ env "NOMAD_SECRETS_DIR" }}/oauth.env
auth_provider_id=$(su -- git gitea admin auth list | awk '/authelia/ { print $1 }')
if [ -z "$auth_provider_id" ]; then
echo "Creating Authelia OAuth provider"
su -- git gitea admin auth add-oauth \
--name authelia \
--provider openidConnect \
--key "$CLIENT_ID" \
--secret "$SECRET" \
--auto-discover-url https://authelia.thefij.rocks/.well-known/openid-configuration \
--skip-local-2fa
else
echo "Updating Authelia OAuth provider"
su -- git gitea admin auth update-oauth \
--id $auth_provider_id \
--key "$CLIENT_ID" \
--secret "$SECRET"
fi
EOF
dest = "bootstrap_auth.sh"
perms = "777"
change_mode = "noop"
mount = false
},
] ]
} }

View File

@ -1,25 +0,0 @@
module "languagetool" {
source = "./service"
name = "languagetool"
image = "ghcr.io/erikvl87/docker-languagetool/languagetool:4.8"
ingress = true
service_port = 8010
use_wesher = var.use_wesher
env = {
Java_Xmx = "512m"
}
service_check = {
path = "/v2/healthcheck"
}
# Possibility to use a volume over nfs to host n-gram datasets
# https://github.com/Erikvl87/docker-languagetool/pkgs/container/docker-languagetool%2Flanguagetool#using-n-gram-datasets
resources = {
cpu = 100
memory = 512
}
}

View File

@ -21,6 +21,15 @@ monitors:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
- 'https://grafana.thefij.rocks' - 'https://grafana.thefij.rocks'
- name: Plex
command:
- 'curl'
- '--silent'
- '--show-error'
- '-o'
- '/dev/null'
- 'http://192.168.2.10:32400'
- name: NZBget - name: NZBget
command: command:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
@ -36,11 +45,6 @@ monitors:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
- 'https://lidarr.thefij.rocks' - 'https://lidarr.thefij.rocks'
- name: Radarr
command:
- '/app/scripts/curl_ok.sh'
- 'https://radarr.thefij.rocks'
- name: Authelia - name: Authelia
command: command:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
@ -51,20 +55,6 @@ monitors:
- '/app/scripts/curl_ok.sh' - '/app/scripts/curl_ok.sh'
- 'https://photoprism.thefij.rocks' - 'https://photoprism.thefij.rocks'
- name: Prometheus
command:
- '/app/scripts/curl_ok.sh'
- 'https://prometheus.thefij.rocks'
- name: Plex
command:
- 'curl'
- '--silent'
- '--show-error'
- '-o'
- '/dev/null'
- 'http://192.168.2.10:32400'
alerts: alerts:
log: log:
command: command:

View File

@ -7,10 +7,6 @@ job "fixers" {
prohibit_overlap = true prohibit_overlap = true
} }
meta = {
"diun.enable" = false
}
group "orphaned_services" { group "orphaned_services" {
task "orphaned_services" { task "orphaned_services" {
driver = "docker" driver = "docker"
@ -41,7 +37,7 @@ job "fixers" {
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/nomad-service-fixers:0.1.1" image = "iamthefij/nomad-service-fixers:0.1.0"
command = "/scripts/nomad_missing_services.py" command = "/scripts/nomad_missing_services.py"
args = ["--restart"] args = ["--restart"]
} }

View File

@ -1,8 +1,8 @@
module "photoprism" { module "photoprism_module" {
source = "./service" source = "./service"
name = "photoprism" name = "photoprism"
image = "photoprism/photoprism:240711" image = "photoprism/photoprism:231128"
image_pull_timeout = "10m" image_pull_timeout = "10m"
# constraints = [{ # constraints = [{
# attribute = "$${meta.hw_transcode.type}" # attribute = "$${meta.hw_transcode.type}"
@ -37,21 +37,18 @@ module "photoprism" {
ingress = true ingress = true
service_port = 2342 service_port = 2342
use_wesher = var.use_wesher use_wesher = var.use_wesher
ingress_middlewares = [
"authelia@nomad"
]
mysql_bootstrap = { mysql_bootstrap = {
enabled = true enabled = true
} }
oidc_client_config = {
description = "Photoprism"
redirect_uris = [
"https://photoprism.thefij.rocks/api/v1/oidc/redirect",
]
scopes = ["openid", "email", "profile"]
}
env = { env = {
PHOTOPRISM_DEBUG = true PHOTOPRISM_DEBUG = true
# Make public since we added Authelia at the proxy level
PHOTOPRISM_AUTH_MODE = "public"
# UI # UI
PHOTOPRISM_SITE_CAPTION = "AI-Powered Photos App" PHOTOPRISM_SITE_CAPTION = "AI-Powered Photos App"
PHOTOPRISM_SITE_DESCRIPTION = "Fijolek home photos" PHOTOPRISM_SITE_DESCRIPTION = "Fijolek home photos"
@ -60,7 +57,6 @@ module "photoprism" {
PHOTOPRISM_SPONSOR = "true" PHOTOPRISM_SPONSOR = "true"
# Worker config # Worker config
PHOTOPRISM_WORKERS = 2 PHOTOPRISM_WORKERS = 2
PHOTOPRISM_BACKUP_DATABASE = false
# Paths # Paths
PHOTOPRISM_ORIGINALS_PATH = "/photoprism-media/Library" PHOTOPRISM_ORIGINALS_PATH = "/photoprism-media/Library"
PHOTOPRISM_IMPORT_PATH = "/photoprism-media/Import" PHOTOPRISM_IMPORT_PATH = "/photoprism-media/Import"
@ -69,12 +65,6 @@ module "photoprism" {
PHOTOPRISM_UID = 500 PHOTOPRISM_UID = 500
PHOTOPRISM_GID = 100 PHOTOPRISM_GID = 100
PHOTOPRISM_UMASK = 0000 PHOTOPRISM_UMASK = 0000
# OIDC
PHOTOPRISM_OIDC_URI = "https://authelia.thefij.rocks"
PHOTOPRISM_OIDC_PROVIDER = "Authelia"
PHOTOPRISM_OIDC_REGISTER = true
PHOTOPRISM_OIDC_REDIRECT = false
PHOTOPRISM_OIDC_SCOPES = "openid email profile"
} }
templates = [ templates = [
@ -96,10 +86,6 @@ module "photoprism" {
PHOTOPRISM_FFMPEG_ENCODER=intel PHOTOPRISM_FFMPEG_ENCODER=intel
PHOTOPRISM_INIT="intel tensorflow" PHOTOPRISM_INIT="intel tensorflow"
{{- end }} {{- end }}
{{ with nomadVar "secrets/authelia/photoprism" -}}
PHOTOPRISM_OIDC_CLIENT={{ .client_id }}
PHOTOPRISM_OIDC_SECRET={{ .secret }}
{{- end }}
EOF EOF
dest_prefix = "$${NOMAD_SECRETS_DIR}/" dest_prefix = "$${NOMAD_SECRETS_DIR}/"
dest = "env" dest = "env"
@ -107,13 +93,4 @@ module "photoprism" {
mount = false mount = false
}, },
] ]
actions = [
{
name = "import"
command = "photoprism"
args = ["import", "/photoprism-media/Import"]
cron = "@daily"
},
]
} }

View File

@ -8,13 +8,10 @@ resource "nomad_job" "service" {
args = var.args args = var.args
env = var.env env = var.env
task_meta = var.task_meta task_meta = var.task_meta
task_identity = var.task_identity
group_meta = var.group_meta group_meta = var.group_meta
job_meta = var.job_meta job_meta = var.job_meta
constraints = var.constraints constraints = var.constraints
docker_devices = var.docker_devices docker_devices = var.docker_devices
user = var.user
actions = var.actions
service_port = var.service_port service_port = var.service_port
service_port_static = var.service_port_static service_port_static = var.service_port_static
@ -226,53 +223,3 @@ EOH
task = var.name task = var.name
} }
} }
module "oidc_client" {
count = var.oidc_client_config != null ? 1 : 0
source = "../../core/oidc_client"
name = var.name
oidc_client_config = {
description = var.oidc_client_config.description
authorization_policy = var.oidc_client_config.authorization_policy
redirect_uris = var.oidc_client_config.redirect_uris
scopes = var.oidc_client_config.scopes
}
job_acl = {
job_id = resource.nomad_job.service.id
group = var.name
task = var.name
}
}
# Action cron jobs
resource "nomad_job" "action_cron" {
for_each = tomap({ for action in var.actions : action.name => action if action.cron != null })
jobspec = templatefile("${path.module}/service_scheduled.nomad", {
name = var.name
action_name = each.value.name
action_cron = each.value.cron
})
}
resource "nomad_acl_policy" "action_cron_workload_policy" {
for_each = resource.nomad_job.action_cron
name = "service-action-${each.value.id}"
description = "Give custom service cron actions access to execute actions."
rules_hcl = <<EOH
namespace "default" {
capabilities = [
"list-jobs",
"read-job",
"alloc-exec",
]
}
EOH
job_acl {
job_id = each.value.id
}
}

View File

@ -1,39 +0,0 @@
job "${name}-${action_name}" {
region = "global"
datacenters = ["dc1"]
type = "batch"
periodic {
cron = "${action_cron}"
}
group "main" {
task "${action_name}" {
driver = "docker"
config {
image = "hashicorp/nomad:$${attr.nomad.version}"
args = [
"job",
"action",
"-job",
"${name}",
"-group",
"${name}",
"-task",
"${name}",
"${action_name}"
]
}
env = {
NOMAD_ADDR = "unix:///secrets/api.sock"
}
identity {
env = true
}
}
}
}

View File

@ -5,10 +5,6 @@ job "${name}" {
type = "service" type = "service"
priority = ${priority} priority = ${priority}
update {
auto_revert = true
}
group "${name}" { group "${name}" {
count = ${count} count = ${count}
%{~ if length(job_meta) > 0 } %{~ if length(job_meta) > 0 }
@ -80,9 +76,6 @@ job "${name}" {
task "${name}" { task "${name}" {
driver = "docker" driver = "docker"
%{~ if user != null }
user = "${user}"
%{~ endif ~}
%{~ if length(task_meta) > 0 } %{~ if length(task_meta) > 0 }
meta = { meta = {
%{ for k, v in task_meta ~} %{ for k, v in task_meta ~}
@ -185,14 +178,6 @@ job "${name}" {
%{~ endfor ~} %{~ endfor ~}
} }
%{~ endif ~} %{~ endif ~}
%{~ for action in actions }
action "${action.name}" {
command = "${action.command}"
%{~ if length(action.args) > 0 ~}
args = ${jsonencode(action.args)}
%{~ endif ~}
}
%{~ endfor ~}
%{~ for volume in host_volumes } %{~ for volume in host_volumes }
volume_mount { volume_mount {
volume = "${volume.name}" volume = "${volume.name}"
@ -212,9 +197,6 @@ EOF
%{~ if template.right_delimiter != null } %{~ if template.right_delimiter != null }
right_delimiter = "${template.right_delimiter}" right_delimiter = "${template.right_delimiter}"
%{~ endif ~} %{~ endif ~}
%{~ if template.perms != null }
perms = "${template.perms}"
%{~ endif ~}
%{~ if template.change_mode != null } %{~ if template.change_mode != null }
change_mode = "${template.change_mode}" change_mode = "${template.change_mode}"
%{~ endif ~} %{~ endif ~}
@ -243,12 +225,6 @@ EOF
%{~ endif ~} %{~ endif ~}
} }
%{~ endif ~} %{~ endif ~}
%{~ if task_identity != null }
identity {
env = ${task_identity.env}
file = ${task_identity.file}
}
%{~ endif ~}
} }
%{~ if mysql_bootstrap != null } %{~ if mysql_bootstrap != null }
task "mysql-bootstrap" { task "mysql-bootstrap" {
@ -263,10 +239,10 @@ EOF
image = "mariadb:10" image = "mariadb:10"
args = [ args = [
"/usr/bin/timeout", "/usr/bin/timeout",
"20m", "2m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do echo 'Retry in 10s'; sleep 10; done", "until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
] ]
} }
@ -326,10 +302,10 @@ SELECT 'NOOP';
image = "postgres:14" image = "postgres:14"
args = [ args = [
"/usr/bin/timeout", "/usr/bin/timeout",
"20m", "2m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"until /bin/bash $${NOMAD_TASK_DIR}/bootstrap.sh; do echo 'Retry in 10s'; sleep 10; done", "until /bin/bash $${NOMAD_TASK_DIR}/bootstrap.sh; do sleep 10; done",
] ]
} }
@ -398,7 +374,7 @@ $$;
} }
config { config {
image = "iamthefij/stunnel:1.0.0" image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"] args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
} }

View File

@ -21,6 +21,7 @@ variable "priority" {
description = "Scheduler priority of the service" description = "Scheduler priority of the service"
} }
variable "image" { variable "image" {
type = string type = string
description = "Image that should be run" description = "Image that should be run"
@ -32,27 +33,12 @@ variable "image_pull_timeout" {
description = "A time duration that controls how long Nomad will wait before cancelling an in-progress pull of the Docker image" description = "A time duration that controls how long Nomad will wait before cancelling an in-progress pull of the Docker image"
} }
variable "user" {
type = string
default = null
description = "User to be passed to the task driver for execution. [ user | user:group | uid | uid:gid | user:gid | uid:group ]"
}
variable "task_meta" { variable "task_meta" {
type = map(string) type = map(string)
default = {} default = {}
description = "Meta attributes to attach to the task" description = "Meta attributes to attach to the task"
} }
variable "task_identity" {
description = "Task workload identity"
type = object({
env = optional(bool, false)
file = optional(bool, false)
})
default = null
}
variable "group_meta" { variable "group_meta" {
type = map(string) type = map(string)
default = {} default = {}
@ -176,7 +162,6 @@ variable "templates" {
right_delimiter = optional(string) right_delimiter = optional(string)
mount = optional(bool, true) mount = optional(bool, true)
env = optional(bool, false) env = optional(bool, false)
perms = optional(string)
change_mode = optional(string) change_mode = optional(string)
change_signal = optional(string) change_signal = optional(string)
change_script = optional(object({ change_script = optional(object({
@ -284,17 +269,6 @@ variable "use_wesher" {
default = true default = true
} }
variable "actions" {
description = "Nomad actions that should be part of the main task"
type = list(object({
name = string
command = string
args = optional(list(string))
cron = optional(string)
}))
default = []
}
variable "service_check" { variable "service_check" {
description = "Health check for main ingress service" description = "Health check for main ingress service"
type = object({ type = object({
@ -308,15 +282,3 @@ variable "service_check" {
default = {} default = {}
} }
variable "oidc_client_config" {
description = "Authelia oidc client configuration to enable oidc authentication"
type = object({
description = string
authorization_policy = optional(string, "one_factor")
redirect_uris = list(string)
scopes = list(string)
})
default = null
}

View File

@ -39,11 +39,6 @@ module "sonarr" {
dest = "/media" dest = "/media"
read_only = false read_only = false
}, },
{
name = "media-overflow-write"
dest = "/media-overflow"
read_only = false
},
] ]
resources = { resources = {

View File

@ -7,17 +7,13 @@ job "unifi-traffic-route-ips" {
prohibit_overlap = true prohibit_overlap = true
} }
meta = {
"diun.enable" = false
}
group "main" { group "main" {
task "main" { task "main" {
driver = "docker" driver = "docker"
config { config {
image = "iamthefij/unifi-traffic-routes:0.0.4" image = "iamthefij/unifi-traffic-routes:0.0.2"
} }
env = { env = {

View File

@ -1,35 +0,0 @@
module "ytdl-web" {
source = "./service"
name = "ytdl-web"
image = "iamthefij/ytdl-web:0.1.4"
args = ["poetry", "run", "python", "-m", "ytdl_web.web", "--downloader"]
ingress = true
service_port = 5000
use_wesher = var.use_wesher
# service_check = null
user = "1001:100"
env = {
QUEUE_DIR = "/data/queue"
OUTPUT_TMPL = "/media/RomeTube/%(uploader)s%(channel)s/%(title)s.%(ext)s"
}
resources = {
cpu = 50
memory = 150
}
host_volumes = [
{
name = "ytdl-web"
dest = "/data"
read_only = false
},
{
name = "media-write"
dest = "/media"
read_only = false
},
]
}