Compare commits

..

1 Commits

Author SHA1 Message Date
2d6fc3d9ef WIP: Moving vars and service discovery to Nomad
Starting with core
2022-11-20 16:24:00 -08:00
157 changed files with 4485 additions and 6126 deletions

2
.gitignore vendored
View File

@ -49,5 +49,3 @@ vault-keys.json
nomad_bootstrap.json nomad_bootstrap.json
consul_values.yml consul_values.yml
vault_hashi_vault_values.yml vault_hashi_vault_values.yml
vault_*.yml
ansible_playbooks/vars/nomad_vars.yml

View File

@ -18,17 +18,9 @@ repos:
- id: check-added-large-files - id: check-added-large-files
- id: check-merge-conflict - id: check-merge-conflict
- id: end-of-file-fixer - id: end-of-file-fixer
exclude: "^ansible_playbooks/vars/nomad_vars.sample.yml$"
- id: trailing-whitespace - id: trailing-whitespace
- repo: https://github.com/Yelp/detect-secrets - repo: https://github.com/Yelp/detect-secrets
rev: v1.4.0 rev: v1.4.0
hooks: hooks:
- id: detect-secrets - id: detect-secrets
args: ['--baseline', '.secrets-baseline'] args: ['--baseline', '.secrets-baseline']
- repo: local
hooks:
- id: variable-sample
name: generate variable sample file
language: system
entry: bash -c 'venv/bin/python scripts/nomad_vars.py print > ./ansible_playbooks/vars/nomad_vars.sample.yml'
types: [file]

View File

@ -113,33 +113,41 @@
{ {
"path": "detect_secrets.filters.regex.should_exclude_secret", "path": "detect_secrets.filters.regex.should_exclude_secret",
"pattern": [ "pattern": [
"(\\${.*}|from_env|fake|!secret|VALUE)" "(\\${.*}|from_env|fake|!secret)"
] ]
} }
], ],
"results": { "results": {
"core/authelia.yml": [ "ansible_playbooks/vars/vault_hashi_vault_values.example.yml": [
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "core/authelia.yml", "filename": "ansible_playbooks/vars/vault_hashi_vault_values.example.yml",
"hashed_secret": "7cb6efb98ba5972a9b5090dc2e517fe14d12cb04", "hashed_secret": "f2baa52d02ca888455ce47823f47bf372d5eecb3",
"is_verified": false, "is_verified": false,
"line_number": 54, "line_number": 8,
"is_secret": false "is_secret": false
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "core/authelia.yml", "filename": "ansible_playbooks/vars/vault_hashi_vault_values.example.yml",
"hashed_secret": "a32b08d97b1615dc27f58b6b17f67624c04e2c4f", "hashed_secret": "18960546905b75c869e7de63961dc185f9a0a7c9",
"is_verified": false, "is_verified": false,
"line_number": 189, "line_number": 10,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "ansible_playbooks/vars/vault_hashi_vault_values.example.yml",
"hashed_secret": "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33",
"is_verified": false,
"line_number": 22,
"is_secret": false "is_secret": false
} }
], ],
"core/grafana/grafana.ini": [ "core/metrics/grafana/grafana.ini": [
{ {
"type": "Basic Auth Credentials", "type": "Basic Auth Credentials",
"filename": "core/grafana/grafana.ini", "filename": "core/metrics/grafana/grafana.ini",
"hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4", "hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4",
"is_verified": false, "is_verified": false,
"line_number": 78, "line_number": 78,
@ -147,7 +155,7 @@
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "core/grafana/grafana.ini", "filename": "core/metrics/grafana/grafana.ini",
"hashed_secret": "55ebda65c08313526e7ba08ad733e5ebea9900bd", "hashed_secret": "55ebda65c08313526e7ba08ad733e5ebea9900bd",
"is_verified": false, "is_verified": false,
"line_number": 109, "line_number": 109,
@ -155,7 +163,7 @@
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "core/grafana/grafana.ini", "filename": "core/metrics/grafana/grafana.ini",
"hashed_secret": "d033e22ae348aeb5660fc2140aec35850c4da997", "hashed_secret": "d033e22ae348aeb5660fc2140aec35850c4da997",
"is_verified": false, "is_verified": false,
"line_number": 151, "line_number": 151,
@ -163,7 +171,7 @@
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "core/grafana/grafana.ini", "filename": "core/metrics/grafana/grafana.ini",
"hashed_secret": "10bea62ff1e1a7540dc7a6bc10f5fa992349023f", "hashed_secret": "10bea62ff1e1a7540dc7a6bc10f5fa992349023f",
"is_verified": false, "is_verified": false,
"line_number": 154, "line_number": 154,
@ -171,7 +179,7 @@
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "core/grafana/grafana.ini", "filename": "core/metrics/grafana/grafana.ini",
"hashed_secret": "5718bce97710e6be87ea160b36eaefb5032857d3", "hashed_secret": "5718bce97710e6be87ea160b36eaefb5032857d3",
"is_verified": false, "is_verified": false,
"line_number": 239, "line_number": 239,
@ -179,7 +187,7 @@
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "core/grafana/grafana.ini", "filename": "core/metrics/grafana/grafana.ini",
"hashed_secret": "10aed9d7ebef778a9b3033dba3f7813b639e0d50", "hashed_secret": "10aed9d7ebef778a9b3033dba3f7813b639e0d50",
"is_verified": false, "is_verified": false,
"line_number": 252, "line_number": 252,
@ -187,5 +195,5 @@
} }
] ]
}, },
"generated_at": "2024-02-20T18:04:29Z" "generated_at": "2022-11-21T00:23:03Z"
} }

View File

@ -1,40 +1,40 @@
# This file is maintained automatically by "terraform init". # This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/external" {
version = "2.2.0" version = "2.2.3"
hashes = [ hashes = [
"h1:BAjqzVkuXxHtRKG+l9unaZJPk2kWZpSTCEcQPRcl2so=", "h1:uvOYRWcVIqOZSl8YjjaB18yZFz1AWIt2CnK7O45rckg=",
"zh:052f909d25121e93dc799290216292fca67943ccde12ba515068b838a6ff8c66", "zh:184ecd339d764de845db0e5b8a9c87893dcd0c9d822167f73658f89d80ec31c9",
"zh:20e29aeb9989f7a1e04bb4093817c7acc4e1e737bb21a3066f3ea46f2001feff", "zh:2661eaca31d17d6bbb18a8f673bbfe3fe1b9b7326e60d0ceb302017003274e3c",
"zh:2326d101ef427599b72cce30c0e0c1d18ae783f1a897c20f2319fbf54bab0a61", "zh:2c0a180f6d1fc2ba6e03f7dfc5f73b617e45408681f75bca75aa82f3796df0e4",
"zh:3420cbe4fd19cdc96d715d0ae8e79c272608023a76033bbf582c30637f6d570f", "zh:4b92ae44c6baef4c4952c47be00541055cb5280dd3bc8031dba5a1b2ee982387",
"zh:41ec570f87f578f1c57655e2e4fbdb9932d94cf92dc9cd11828cccedf36dd4a4", "zh:5641694d5daf3893d7ea90be03b6fa575211a08814ffe70998d5adb8b59cdc0a",
"zh:5f90dcc58e3356ffead82ea211ecb4a2d7094d3c2fbd14ff85527c3652a595a2", "zh:5bd55a2be8a1c20d732ac9c604b839e1cadc8c49006315dffa4d709b6874df32",
"zh:64aaa48609d2db868fcfd347490df0e12c6c3fcb8e4f12908c5d52b1a0adf73f", "zh:6e0ef5d11e1597202424b7d69b9da7b881494c9b13a3d4026fc47012dc651c79",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:86b4923e10e6ba407d1d2aab83740b702058e8b01460af4f5f0e4008f40e492c", "zh:9e19f89fa25004d3b926a8d15ea630b4bde62f1fa4ed5e11a3d27aabddb77353",
"zh:ae89dcba33097af33a306344d20e4e25181f15dcc1a860b42db5b7199a97c6a6", "zh:b763efdd69fd097616b4a4c89cf333b4cee9699ac6432d73d2756f8335d1213f",
"zh:ce56d68cdfba60891765e94f9c0bf69eddb985d44d97db9f91874bea027f08e2", "zh:e3b561efdee510b2b445f76a52a902c52bee8e13095e7f4bed7c80f10f8d294a",
"zh:e993bcde5dbddaedf3331e3014ffab904f98ab0f5e8b5d6082b7ca5083e0a2f1", "zh:fe660bb8781ee043a093b9a20e53069974475dcaa5791a1f45fd03c61a26478a",
] ]
} }
provider "registry.terraform.io/hashicorp/random" { provider "registry.terraform.io/hashicorp/nomad" {
version = "3.6.0" version = "1.4.19"
hashes = [ hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=", "h1:EdBny2gaLr/IE+l+6csyCKeIGFMYZ/4tHKpcbS7ArgE=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d", "zh:2f3ceeb3318a6304026035b0ac9ee3e52df04913bb9ee78827e58c5398b41254",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211", "zh:3fbe76c7d957d20dfe3c8c0528b33084651f22a95be9e0452b658e0922916e2a",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829", "zh:595671a05828cfe6c42ef73aac894ac39f81a52cc662a76f37eb74ebe04ddf75",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d", "zh:5d76e8788d2af3e60daf8076babf763ec887480bbb9734baccccd8fcddf4f03e",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055", "zh:676985afeaca6e67b22d60d43fd0ed7055763029ffebc3026089fe2fd3b4a288",
"zh:69152ce6164ac999a640cff962ece45208270e1ac37c10dac484eeea5cf47275",
"zh:6da0b15c05b81f947ec8e139bd81eeeb05c0d36eb5a967b985d0625c60998b40",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17", "zh:822c0a3bbada5e38099a379db8b2e339526843699627c3be3664cc3b3752bab7",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21", "zh:af23af2f98a84695b25c8eba7028a81ad4aad63c44aefb79e01bbe2dc82e7f78",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839", "zh:e36cac9960b7506d92925b667254322520966b9c3feb3ca6102e57a1fb9b1761",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0", "zh:ffd1e096c1cc35de879c740a91918e9f06b627818a3cb4b1d87b829b54a6985f",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
] ]
} }

View File

@ -1,16 +1,8 @@
SLEEP_FOR ?= 10
VENV ?= venv VENV ?= venv
.PHONY: sleep
sleep:
sleep $(SLEEP_FOR)
.PHONY: default .PHONY: default
default: check default: check
.PHONY: all
all: cluster bootstrap-values apply
.PHONY: cluster .PHONY: cluster
cluster: ansible-cluster cluster: ansible-cluster
@ -30,8 +22,8 @@ check: $(VENV)
$(VENV)/bin/pre-commit run --all-files $(VENV)/bin/pre-commit run --all-files
# Creates a new secrets baseline # Creates a new secrets baseline
.secrets-baseline: $(VENV) Makefile .secrets-baseline: $(VENV)
$(VENV)/bin/detect-secrets scan --exclude-secrets '(\$${.*}|from_env|fake|!secret|VALUE)' > .secrets-baseline $(VENV)/bin/detect-secrets scan --exclude-secrets '(\$${.*}|from_env|fake|!secret)' > .secrets-baseline
# Audits secrets against baseline # Audits secrets against baseline
.PHONY: secrets-audit .PHONY: secrets-audit
@ -59,18 +51,16 @@ ansible-cluster: $(VENV) ansible_galaxy
./ansible_playbooks/setup-cluster.yml ./ansible_playbooks/setup-cluster.yml
.PHONY: bootstrap-values .PHONY: bootstrap-values
bootstrap-values: $(VENV) bootstrap-values: $(VENV) ansible_galaxy
env NOMAD_ADDR=http://192.168.2.101:4646 \ env VIRTUAL_ENV=$(VENV) $(VENV)/bin/ansible-playbook -vv \
NOMAD_TOKEN=$(shell jq -r .SecretID nomad_bootstrap.json) \ -e "@vault-keys.json" \
$(VENV)/bin/python ./scripts/nomad_vars.py ./ansible_playbooks/bootstrap-values.yml
.PHONY: recover-nomad .PHONY: unseal-vault
recover-nomad: $(VENV) unseal-vault: $(VENV) ansible_galaxy
$(VENV)/bin/ansible-playbook -K ./ansible_playbooks/recover-nomad.yaml env VIRTUAL_ENV=$(VENV) $(VENV)/bin/ansible-playbook -K -vv \
-e "@vault-keys.json" \
.PHONY: stop-cluster ./ansible_playbooks/unseal-vault.yml
stop-cluster: $(VENV)
$(VENV)/bin/ansible-playbook -K ./ansible_playbooks/stop-cluster.yml
.PHONY: init .PHONY: init
init: init:
@ -80,26 +70,10 @@ init:
plan: plan:
@terraform plan \ @terraform plan \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \ -var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
-var "vault_token=$(shell jq -r .root_token vault-keys.json)"
.PHONY: apply .PHONY: apply
apply: apply:
@terraform apply \ @terraform apply \
-auto-approve \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \ -var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
-var "vault_token=$(shell jq -r .root_token vault-keys.json)"
.PHONY: refresh
refresh:
@terraform refresh \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: destroy
destroy:
@terraform destroy \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: clean
clean:
env VIRTUAL_ENV=$(VENV) $(VENV)/bin/ansible-playbook -vv \
./ansible_playbooks/clear-data.yml
find -name "*.tfstate" -exec rm '{}' \;
rm -f ./vault-keys.json ./nomad_bootstrap.json

View File

@ -1,46 +0,0 @@
# Homelab Nomad
My configuration for creating my home Nomad cluster and deploying services to it.
This repo is not designed as general purpose templates, but rather to fit my specific needs. That said, I have made an effort for things to be as useful as possible for someone wanting to use or modify this.
## Running
make all
## Design
Both Ansible and Terraform are used as part of this configuration. All hosts must be reachable over SSH prior to running any of this configuration.
To begin, Ansible runs a playbook to setup the cluster. This includes installing Nomad, bootstrapping the cluster and ACLs, setting up NFS shares, creating Nomad Host Volumes, and setting up Wesher as a Wireguard mesh between hosts.
After this is complete, Nomad variables must be set for services to access and configure correctly. This depends on variables to be set based on the sample file.
Finally, the Terraform configuration can be applied setting up all services deployed on the cluster.
The configuration of new services is intended to be as templated as possible and to avoid requiring changes in multiple places. For example, most services are configured with a template that provides reverse proxy, DNS records, database tunnels, database bootstrapping, metrics scraping, and authentication. The only real exception is backups, which requires a distinct job file, for now.
## What does it do?
* Nomad cluster for scheduling and configuring all services
* Blocky DNS servers with integrated ad blocking. This also provides service discovery
* Prometheus with autodiscovery of service metrics
* Loki and Promtail aggregating logs
* Minitor for service availability checks
* Grafana providing dashboards, alerting, and log searching
* Photoprism for photo management
* Remote and shared volumes over NFS
* Authelia for OIDC and Proxy based authentication with 2FA
* Sonarr and Lidarr for multimedia management
* Automated block based backups using Restic
## Step by step
1. Update hosts in `ansible_playbooks/ansible_hosts.yml`
2. Update `ansible_playbook/setup-cluster.yml`
1. Update backup DNS server
2. Update NFS shares from NAS
3. Update volumes to make sure they are valid paths
3. Create `ansible_playbooks/vars/nomad_vars.yml` based on the sample file. TODO: This is quite specific and probably impossible without more documentation
4. Run `make all`
5. Update your network DNS settings to use the new servers IP addresses

View File

@ -1,21 +1,59 @@
# This file is maintained automatically by "terraform init". # This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/consul" {
version = "1.4.20" version = "2.15.1"
hashes = [ hashes = [
"h1:M/QVXHPfeySejJZI3I8mBYrL/J9VsbnyF/dKIMlUhXo=", "h1:PexyQBRLDA+SR+sWlzYBZswry5O5h/tTfj87CaECtLc=",
"zh:02989edcebe724fc0aa873b22176fd20074c4f46295e728010711a8fc5dfa72c", "zh:1806830a3cf103e65e772a7d28fd4df2788c29a029fb2def1326bc777ad107ed",
"zh:089ba7d19bcf5c6bab3f8b8c5920eb6d78c52cf79bb0c5dfeb411c600e7efcba", "zh:252be544fb4c9daf09cad7d3776daf5fa66b62740d3ea9d6d499a7b1697c3433",
"zh:235865a2182ca372bcbf440201a8b8cc0715ad5dbc4de893d99b6f32b5be53ab", "zh:50985fe02a8e5ae47c75d7c28c911b25d7dc4716cff2ed55ca05889ab77a1f73",
"zh:67ea718764f3f344ecc6e027d20c1327b86353c8064aa90da3ec12cec4a88954", "zh:54cf0ec90538703c66937c77e8d72a38d5af47437eb0b8b55eb5836c5d288878",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:704f536c621337e06fffef6d5f49ac81f52d249f937250527c12884cb83aefed",
"zh:8c68c540f0df4980568bdd688c2adec86eda62eb2de154e3db215b16de0a7ae0", "zh:896d8ef6d0b555299f124eb25bce8a17d735da14ef21f07582098d301f47da30",
"zh:911969c63a69a733be57b96d54c5966c9424e1abec8d5f20038c8cef3a504c65", "zh:976277a85b0a0baafe267cc494f766448d1da5b6936ddcb3ce393bd4d22f08d2",
"zh:a673c92ddc9d47e8d53dcb9b376f1adcb4543488202fc83a3e7eab8677530684", "zh:c7faa9a2b11bc45833a3e8e340f22f1ecf01597eaeffa7669234b4549d7dfa85",
"zh:a94a73eae89fd8c8ebf872013079be41161d3f293f4026c92d45c4c5667dd613", "zh:caf851ef9c8ce482864badf7058f9278d4537112fa236efd8f1a9315801d9061",
"zh:db6b89f8b696040c0344f00928e4cf6e0a75034421ba14cdcd8a4d23bc865dce", "zh:db203435d58b0ac842540861b3307a623423275d85754c171773f3b210ae5b24",
"zh:e512c0b1239e3d66b60d22c2b4de19fea288e492cde90dff9277cc475fd9dbbf", "zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
"zh:ef6eccecbdef3bb8ce629cabfb5550c1db5c3e952943dda1786ef6cb470a8c23", "zh:f710a37190429045d109edd35de69db3b5f619919c2fa04c77a3a639fea9fd7d",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.17"
hashes = [
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
]
}
provider "registry.terraform.io/hashicorp/vault" {
version = "3.7.0"
hashes = [
"h1:idawLPCbZgHIb+NRLJs4YdIcQgACqYiT5VwQfChkn+w=",
"zh:256b82692c560c76ad51414a2c003cadfa10338a9df333dbe22dd14a9ed16f95",
"zh:329ed8135a98bd6a000d014e40bc5981c6868cf50eedf454f1a1f72ac463bdf0",
"zh:3b32c18b492a6ac8e1ccac40d28cd42a88892ef8f3515291676136e3faac351c",
"zh:4c5ea8e80543b36b1999257a41c8b9cde852542251de82a94cff2f9d280ac2ec",
"zh:5d968ed305cde7aa3567a943cb2f5f8def54b40a2292b66027b1405a1cf28585",
"zh:60226d1a0a496a9a6c1d646800dd7e1bd1c4f5527e7307ff0bca9f4d0b5395e2",
"zh:71b11def501c994ee5305f24bd47ebfcca2314c5acca3efcdd209373d0068ac0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:89be6b5db3be473bfd14422a9abf83245c4b22ce47a8fe463bbebf8e20958ab1",
"zh:8f91051d43ae309bb8f3f6a9659f0fd26b1b239faf671c139b4e9ad0d208db05",
"zh:b5114983273d3170878f657b92738b2c40953aedeef2e1840588ecaf1bc0827e",
"zh:fd56db01c5444dc8ca2e0ad2f13fc4c17735d0fdeb5960e23176fb3f5a5114d3",
] ]
} }

View File

@ -1,12 +1,6 @@
namespace "*" { namespace "*" {
policy = "write" policy = "write"
capabilities = ["alloc-node-exec"] capabilities = ["alloc-node-exec"]
variables {
path "*" {
capabilities = ["write", "read", "destroy"]
}
}
} }
agent { agent {

View File

@ -1,17 +0,0 @@
resource "nomad_acl_role" "admin" {
name = "admin"
description = "Nomad administrators"
policy {
name = nomad_acl_policy.admin.name
}
}
resource "nomad_acl_role" "deploy" {
name = "deploy"
description = "Authorized to conduct deployments and view logs"
policy {
name = nomad_acl_policy.deploy.name
}
}

48
acls/nomad_vault.tf Normal file
View File

@ -0,0 +1,48 @@
# Set up nomad provider in vault for Nomad ACLs
resource "nomad_acl_token" "vault" {
name = "vault"
type = "management"
}
resource "vault_nomad_secret_backend" "config" {
backend = "nomad"
description = "Nomad ACL"
token = nomad_acl_token.vault.secret_id
default_lease_ttl_seconds = "3600"
max_lease_ttl_seconds = "7200"
ttl = "3600"
max_ttl = "7200"
}
# Vault roles generating Nomad tokens
resource "vault_nomad_secret_role" "nomad-deploy" {
backend = vault_nomad_secret_backend.config.backend
role = "nomad-deploy"
# Nomad policies
policies = ["deploy"]
}
resource "vault_nomad_secret_role" "admin-management" {
backend = vault_nomad_secret_backend.config.backend
role = "admin-management"
type = "management"
}
resource "vault_nomad_secret_role" "admin" {
backend = vault_nomad_secret_backend.config.backend
role = "admin"
# Nomad policies
policies = ["admin"]
}
# Nomad Vault token access
resource "vault_token_auth_backend_role" "nomad-cluster" {
role_name = "nomad-cluster"
token_explicit_max_ttl = 0
allowed_policies = ["access-tables", "nomad-task"]
orphan = true
token_period = 259200
renewable = true
}

17
acls/nomad_vault_db.tf Normal file
View File

@ -0,0 +1,17 @@
# resource "vault_mount" "db" {
# path = "database"
# type = "database"
# }
#
# resource "vault_database_secret_backend_connection" "mysql" {
# backend = vault_mount.db.path
# name = "mysql"
# allowed_roles = ["accessdb"]
#
# mysql {
# # How to give access here?
# connection_url = "{{username}}:{{password}}@tcp(mysql-server.service.consul:3306)"
# username = ""
# password = ""
# }
# }

View File

@ -1,6 +1,38 @@
# Configure Consul provider
provider "consul" {
address = var.consul_address
}
# Get Nomad client from Consul
data "consul_service" "nomad" {
name = "nomad-client"
}
# Get Vault client from Consul
data "consul_service" "vault" {
name = "vault"
tag = "active"
}
locals {
# Get Nomad address from Consul
nomad_node = data.consul_service.nomad.service[0]
nomad_node_address = "http://${local.nomad_node.node_address}:${local.nomad_node.port}"
# Get Vault address from Consul
vault_node = data.consul_service.vault.service[0]
vault_node_address = "http://${local.vault_node.node_address}:${local.vault_node.port}"
}
# Configure the Nomad provider # Configure the Nomad provider
provider "nomad" { provider "nomad" {
address = var.nomad_address address = local.nomad_node_address
secret_id = var.nomad_secret_id secret_id = var.nomad_secret_id
region = "global" region = "global"
} }
# Configure the Vault provider
provider "vault" {
address = local.vault_node_address
token = var.vault_token
}

View File

@ -1,3 +1,8 @@
variable "consul_address" {
type = string
default = "http://n1.thefij:8500"
}
variable "nomad_secret_id" { variable "nomad_secret_id" {
type = string type = string
description = "Secret ID for ACL bootstrapped Nomad" description = "Secret ID for ACL bootstrapped Nomad"
@ -5,7 +10,8 @@ variable "nomad_secret_id" {
default = "" default = ""
} }
variable "nomad_address" { variable "vault_token" {
type = string type = string
default = "http://n1.thefij:4646" sensitive = true
default = ""
} }

8
acls/vault_login.tf Normal file
View File

@ -0,0 +1,8 @@
resource "vault_auth_backend" "userpass" {
type = "userpass"
tune {
max_lease_ttl = "1h"
listing_visibility = "unauth"
}
}

83
acls/vault_policies.tf Normal file
View File

@ -0,0 +1,83 @@
resource "vault_policy" "admin" {
name = "admin"
policy = <<EOF
path "*" {
capabilities = ["create", "read", "update", "delete", "list", "sudo"]
}
EOF
}
resource "vault_policy" "nomad-deploy" {
name = "nomad-deploy"
policy = <<EOH
path "nomad/creds/nomad-deploy" {
capabilities = ["read"]
}
EOH
}
# Policy for clusters
resource "vault_policy" "nomad-task" {
name = "nomad-task"
policy = <<EOH
path "kv/data/*" {
# Does this need create, update, delete?
capabilities = ["create", "read", "update", "delete", "list"]
}
EOH
}
# Policy for nomad tokens
resource "vault_policy" "nomad-server" {
name = "nomad-server"
policy = <<EOH
# Allow creating tokens under "nomad-cluster" token role. The token role name
# should be updated if "nomad-cluster" is not used.
path "auth/token/create/nomad-cluster" {
capabilities = ["update"]
}
# Allow looking up "nomad-cluster" token role. The token role name should be
# updated if "nomad-cluster" is not used.
path "auth/token/roles/nomad-cluster" {
capabilities = ["read"]
}
# Allow looking up the token passed to Nomad to validate # the token has the
# proper capabilities. This is provided by the "default" policy.
path "auth/token/lookup-self" {
capabilities = ["read"]
}
# Allow looking up incoming tokens to validate they have permissions to access
# the tokens they are requesting. This is only required if
# `allow_unauthenticated` is set to false.
path "auth/token/lookup" {
capabilities = ["update"]
}
# Allow revoking tokens that should no longer exist. This allows revoking
# tokens for dead tasks.
path "auth/token/revoke-accessor" {
capabilities = ["update"]
}
# Allow checking the capabilities of our own token. This is used to validate the
# token upon startup.
path "sys/capabilities-self" {
capabilities = ["update"]
}
# Allow our own token to be renewed.
path "auth/token/renew-self" {
capabilities = ["update"]
}
# This section grants all access on "secret/*". Further restrictions can be
# applied to this broad policy, as shown below.
path "kv/data/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
EOH
}

View File

@ -1,72 +1,65 @@
--- ---
all: all:
hosts:
n1.thefij:
nomad_node_class: ingress
nomad_reserved_memory: 1024
# nomad_meta:
# hw_transcode.device: /dev/dri
# hw_transcode.type: intel
nfs_mounts:
- src: 10.50.250.2:/srv/volumes
path: /srv/volumes/moxy
opts: proto=tcp,rw
nomad_unique_host_volumes:
- name: mysql-data
path: /srv/volumes/mysql
owner: "999"
group: "100"
mode: "0755"
read_only: false
- name: postgres-data
path: /srv/volumes/postgres
owner: "999"
group: "999"
mode: "0755"
read_only: false
n2.thefij:
nomad_node_class: ingress
nomad_reserved_memory: 1024
nfs_mounts:
- src: 10.50.250.2:/srv/volumes
path: /srv/volumes/moxy
opts: proto=tcp,rw
nomad_unique_host_volumes:
- name: nextcloud-data
path: /srv/volumes/nextcloud
owner: "root"
group: "bin"
mode: "0755"
read_only: false
pi4:
nomad_node_class: ingress
nomad_reserved_memory: 512
nomad_meta:
hw_transcode.device: /dev/video11
hw_transcode.type: raspberry
qnomad.thefij:
ansible_host: 192.168.2.234
nomad_reserved_memory: 1024
# This VM uses a non-standard interface
nomad_network_interface: ens3
nomad_instances:
vars:
nomad_network_interface: eth0
children: children:
nomad_servers: {} servers:
nomad_clients: {} hosts:
nomad_servers: n1.thefij:
hosts: nomad_node_role: both
nonopi.thefij: nomad_unique_host_volumes:
ansible_host: 192.168.2.170 - name: mysql-data
n1.thefij: {} path: /srv/volumes/mysql
n2.thefij: {} owner: "999"
pi4: {} group: "100"
# qnomad.thefij: {} mode: "0755"
nomad_clients: read_only: false
hosts: - name: lldap-data
n1.thefij: {} path: /srv/volumes/lldap
n2.thefij: {} owner: "root"
pi4: {} group: "bin"
# qnomad.thefij: {} mode: "0755"
read_only: false
n2.thefij:
nomad_node_class: ingress
nomad_node_role: both
nomad_unique_host_volumes:
- name: nextcloud-data
path: /srv/volumes/nextcloud
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: gitea-data
path: /srv/volumes/gitea
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: sonarr-data
path: /srv/volumes/sonarr
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: nzbget-data
path: /srv/volumes/nzbget
owner: "root"
group: "bin"
mode: "0755"
read_only: false
# n3.thefij:
# nomad_node_class: ingress
# nomad_node_role: both
# pi3:
# nomad_node_role: client
pi4:
nomad_node_role: both
consul_instances:
children:
servers: {}
vault_instances:
children:
servers: {}
nomad_instances:
children:
servers: {}

View File

@ -0,0 +1,80 @@
---
- name: Bootstrap Consul values
hosts: consul_instances
gather_facts: false
vars_files:
- vars/consul_values.yml
tasks:
- name: Add values
delegate_to: localhost
run_once: true
block:
- name: Install python-consul
pip:
name: python-consul
extra_args: --index-url https://pypi.org/simple
- name: Write values
consul_kv:
host: "{{ inventory_hostname }}"
key: "{{ item.key }}"
value: "{{ item.value }}"
loop: "{{ consul_values | default({}) | dict2items }}"
- name: Bootstrap value values
hosts: vault_instances
gather_facts: false
vars_files:
- vars/vault_hashi_vault_values.yml
tasks:
- name: Bootstrap Vault secrets
delegate_to: localhost
run_once: true
block:
- name: Install hvac
pip:
name: hvac
extra_args: --index-url https://pypi.org/simple
- name: Check mount
community.hashi_vault.vault_read:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "/sys/mounts/kv"
ignore_errors: true
register: check_mount
- name: Create kv mount
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "/sys/mounts/kv"
data:
type: kv-v2
when: check_mount is not succeeded
- name: Write values
no_log: true
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "kv/data/{{ item.key }}"
data:
data:
"{{ item.value }}"
loop: "{{ hashi_vault_values | default({}) | dict2items }}"
retries: 2
delay: 10
- name: Write userpass
no_log: true
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "auth/userpass/users/{{ item.name }}"
data: '{"password": "{{ item.password }}", "policies": "{{ item.policies }}"}'
loop: "{{ vault_userpass }}"

View File

@ -1,5 +1,27 @@
# Stops Nomad and clears all data from its ata dirs # Stops Consul, Vault, and Nomad and clears all data from their data dirs
--- ---
- name: Delete Consul data
hosts: consul_instances
tasks:
- name: Stop consul
systemd:
name: consul
state: stopped
become: true
- name: Stop vault
systemd:
name: vault
state: stopped
become: true
- name: Remove data dir
file:
path: /opt/consul
state: absent
become: true
- name: Delete Nomad data - name: Delete Nomad data
hosts: nomad_instances hosts: nomad_instances

View File

@ -1,27 +0,0 @@
- name: Restart nomad and wesher
hosts: nomad_instances
tasks:
- name: Stop Nomad
systemd:
name: nomad
state: stopped
become: true
- name: Restart wesher
systemd:
name: wesher
state: restarted
become: true
- name: Start Docker
systemd:
name: docker
state: started
become: true
- name: Start Nomad
systemd:
name: nomad
state: started
become: true

View File

@ -0,0 +1,84 @@
---
- name: Stop Nomad
hosts: nomad_instances
tasks:
- name: Stop Nomad
systemd:
name: nomad
state: stopped
become: true
- name: Stop Vault
hosts: nomad_instances
tasks:
- name: Stop Vault
systemd:
name: vault
state: stopped
become: true
- name: Recover Consul
hosts: consul_instances
tasks:
- name: Stop Consul
systemd:
name: consul
state: stopped
become: true
- name: Get node-id
slurp:
src: /opt/consul/node-id
register: consul_node_id
become: true
- name: Node Info
debug:
msg: |
node_id: {{ consul_node_id.content }}
address: {{ ansible_default_ipv4.address }}
- name: Save
copy:
dest: "/opt/consul/raft/peers.json"
# I used to have reject('equalto', inventory_hostname) in the loop, but I'm not sure if I should
content: |
[
{% for host in ansible_play_hosts -%}
{
"id": "{{ hostvars[host].consul_node_id.content }}",
"address": "{{ hostvars[host].ansible_default_ipv4.address }}:8300",
"non_voter": false
}{% if not loop.last %},{% endif %}
{% endfor -%}
]
become: true
- name: Restart Consul
systemd:
name: consul
state: restarted
become: true
# - name: Start Vault
# hosts: nomad_instances
#
# tasks:
# - name: Start Vault
# systemd:
# name: vault
# state: started
# become: true
#
# - name: Start Nomad
# hosts: nomad_instances
#
# tasks:
# - name: Start Nomad
# systemd:
# name: nomad
# state: started
# become: true

View File

@ -1,7 +1,6 @@
--- ---
- name: Recover Nomad - name: Recover Nomad
hosts: nomad_servers hosts: nomad_instances
any_errors_fatal: true
tasks: tasks:
- name: Stop Nomad - name: Stop Nomad
@ -10,10 +9,6 @@
state: stopped state: stopped
become: true become: true
- name: Remount all shares
command: mount -a
become: true
- name: Get node-id - name: Get node-id
slurp: slurp:
src: /var/nomad/server/node-id src: /var/nomad/server/node-id
@ -23,7 +18,7 @@
- name: Node Info - name: Node Info
debug: debug:
msg: | msg: |
node_id: {{ nomad_node_id.content | b64decode }} node_id: {{ nomad_node_id.content }}
address: {{ ansible_default_ipv4.address }} address: {{ ansible_default_ipv4.address }}
- name: Save - name: Save
@ -34,7 +29,7 @@
[ [
{% for host in ansible_play_hosts -%} {% for host in ansible_play_hosts -%}
{ {
"id": "{{ hostvars[host].nomad_node_id.content | b64decode }}", "id": "{{ hostvars[host].nomad_node_id.content }}",
"address": "{{ hostvars[host].ansible_default_ipv4.address }}:4647", "address": "{{ hostvars[host].ansible_default_ipv4.address }}:4647",
"non_voter": false "non_voter": false
}{% if not loop.last %},{% endif %} }{% if not loop.last %},{% endif %}

View File

@ -1,20 +1,6 @@
--- ---
- name: Update DNS for bootstrapping with non-Nomad host
hosts: nomad_instances
become: true
gather_facts: false
vars:
non_nomad_dns: 192.168.2.170
tasks:
- name: Add non-nomad bootstrap DNS
lineinfile:
dest: /etc/resolv.conf
create: true
line: "nameserver {{ non_nomad_dns }}"
- name: Install Docker - name: Install Docker
hosts: nomad_clients hosts: nomad_instances
become: true become: true
vars: vars:
docker_architecture_map: docker_architecture_map:
@ -44,25 +30,8 @@
# state: present # state: present
- name: Create NFS mounts - name: Create NFS mounts
hosts: nomad_clients hosts: nomad_instances
become: true become: true
vars:
shared_nfs_mounts:
- src: 192.168.2.10:/Media
path: /srv/volumes/media-read
opts: proto=tcp,port=2049,ro
- src: 192.168.2.10:/Media
path: /srv/volumes/media-write
opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Photos
path: /srv/volumes/photos
opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Container
path: /srv/volumes/nas-container
opts: proto=tcp,port=2049,rw
tasks: tasks:
- name: Install nfs - name: Install nfs
@ -70,16 +39,53 @@
name: nfs-common name: nfs-common
state: present state: present
- name: Mount NFS volumes - name: Create Motioneye NFS mount
ansible.posix.mount: ansible.posix.mount:
src: "{{ item.src }}" src: 192.168.2.10:/Recordings/Motioneye
path: "{{ item.path }}" path: /srv/volumes/motioneye-recordings
opts: "{{ item.opts }}" opts: proto=tcp,port=2049,rw
state: mounted state: mounted
fstype: nfs4 fstype: nfs4
loop: "{{ shared_nfs_mounts + (nfs_mounts | default([])) }}"
- import_playbook: wesher.yml - name: Create Media Library RO NFS mount
ansible.posix.mount:
src: 192.168.2.10:/Multimedia
path: /srv/volumes/media-read
opts: proto=tcp,port=2049,ro
state: mounted
fstype: nfs4
- name: Create Media Library RW NFS mount
ansible.posix.mount:
src: 192.168.2.10:/Multimedia
path: /srv/volumes/media-write
opts: proto=tcp,port=2049,rw
state: mounted
fstype: nfs4
- name: Create Photo library NFS mount
ansible.posix.mount:
src: 192.168.2.10:/Photos
path: /srv/volumes/photos
opts: proto=tcp,port=2049,rw
state: mounted
fstype: nfs4
- name: Create Download RW NFS mount
ansible.posix.mount:
src: 192.168.2.10:/Download
path: /srv/volumes/download
opts: proto=tcp,port=2049,rw
state: mounted
fstype: nfs4
- name: Create Container NAS RW NFS mount
ansible.posix.mount:
src: 192.168.2.10:/Container
path: /srv/volumes/container
opts: proto=tcp,port=2049,rw
state: mounted
fstype: nfs4
- name: Build Nomad cluster - name: Build Nomad cluster
hosts: nomad_instances hosts: nomad_instances
@ -88,6 +94,12 @@
vars: vars:
shared_host_volumes: shared_host_volumes:
- name: motioneye-recordings
path: /srv/volumes/motioneye-recordings
owner: "root"
group: "root"
mode: "0755"
read_only: false
- name: media-read - name: media-read
path: /srv/volumes/media-write path: /srv/volumes/media-write
read_only: true read_only: true
@ -97,36 +109,27 @@
group: "root" group: "root"
mode: "0755" mode: "0755"
read_only: false read_only: false
- name: media-downloads - name: tv-sonarr
path: /srv/volumes/media-write/Downloads path: "/srv/volumes/media-write/TV Shows"
owner: 1001
group: 100
mode: "0755"
read_only: false read_only: false
- name: sabnzbd-config - name: download
path: /srv/volumes/media-write/Downloads/sabnzbd path: /srv/volumes/download
owner: 1001
group: 100
mode: "0755"
read_only: false
- name: nzbget-data
path: /srv/volumes/container/nzbget/config
read_only: false
- name: gitea-data
path: /srv/volumes/container/gitea
read_only: false read_only: false
- name: photoprism-media - name: photoprism-media
path: /srv/volumes/photos/Photoprism path: /srv/volumes/photos/Photoprism
read_only: false read_only: false
- name: photoprism-storage
path: /srv/volumes/nas-container/photoprism
read_only: false
- name: nzbget-config
path: /srv/volumes/nas-container/nzbget
read_only: false
- name: sonarr-config
path: /srv/volumes/nas-container/sonarr
read_only: false
- name: lidarr-config
path: /srv/volumes/nas-container/lidarr
read_only: false
- name: radarr-config
path: /srv/volumes/nas-container/radarr
read_only: false
- name: bazarr-config
path: /srv/volumes/nas-container/bazarr
read_only: false
- name: gitea-data
path: /srv/volumes/nas-container/gitea
read_only: false
- name: all-volumes - name: all-volumes
path: /srv/volumes path: /srv/volumes
owner: "root" owner: "root"
@ -137,10 +140,9 @@
roles: roles:
- name: ansible-nomad - name: ansible-nomad
vars: vars:
nomad_version: "1.8.0-1" nomad_version: "1.4.2-1"
nomad_install_upgrade: true nomad_install_upgrade: true
nomad_allow_purge_config: true nomad_allow_purge_config: true
nomad_node_role: "{% if 'nomad_clients' in group_names %}{% if 'nomad_servers' in group_names %}both{% else %}client{% endif %}{% else %}server{% endif %}"
# Where nomad gets installed to # Where nomad gets installed to
nomad_bin_dir: /usr/bin nomad_bin_dir: /usr/bin
@ -173,8 +175,6 @@
docker: docker:
config: config:
allow_privileged: true allow_privileged: true
gc:
image_delay: "24h"
volumes: volumes:
enabled: true enabled: true
selinuxlabel: "z" selinuxlabel: "z"
@ -194,17 +194,16 @@
nomad_bind_address: 0.0.0.0 nomad_bind_address: 0.0.0.0
# Default interface for binding tasks # Default interface for binding tasks
# This is now set at the inventory level # nomad_network_interface: lo
# nomad_network_interface: eth0
# Create networks for binding task ports # Create networks for binding task ports
nomad_host_networks: nomad_host_networks:
- name: nomad-bridge
interface: nomad
reserved_ports: "22"
- name: loopback - name: loopback
interface: lo interface: lo
reserved_ports: "22" reserved_ports: "22"
- name: wesher
interface: wgoverlay
reserved_ports: "22"
# Enable ACLs # Enable ACLs
nomad_acl_enabled: true nomad_acl_enabled: true
@ -214,7 +213,7 @@
enabled: true enabled: true
- name: Bootstrap Nomad ACLs and scheduler - name: Bootstrap Nomad ACLs and scheduler
hosts: nomad_servers hosts: nomad_instances
tasks: tasks:
- name: Start Nomad - name: Start Nomad
@ -228,9 +227,9 @@
method: GET method: GET
status_code: 200 status_code: 200
register: nomad_check_result register: nomad_check_result
retries: 8 retries: 6
until: nomad_check_result is succeeded until: nomad_check_result is succeeded
delay: 15 delay: 10
changed_when: false changed_when: false
run_once: true run_once: true
@ -244,7 +243,6 @@
run_once: true run_once: true
ignore_errors: true ignore_errors: true
register: bootstrap_result register: bootstrap_result
changed_when: bootstrap_result is succeeded
- name: Save bootstrap result - name: Save bootstrap result
copy: copy:
@ -276,17 +274,14 @@
- list - list
environment: environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}" NOMAD_TOKEN: "{{ read_secretid.stdout }}"
register: policies
run_once: true run_once: true
changed_when: false register: policies
- name: Copy policy - name: Copy policy
copy: copy:
src: ../acls/nomad-anon-policy.hcl src: ../acls/nomad-anon-policy.hcl
dest: /tmp/anonymous.policy.hcl dest: /tmp/anonymous.policy.hcl
delegate_to: "{{ play_hosts[0] }}"
run_once: true run_once: true
register: anon_policy
- name: Create anon-policy - name: Create anon-policy
command: command:
@ -304,18 +299,6 @@
delegate_to: "{{ play_hosts[0] }}" delegate_to: "{{ play_hosts[0] }}"
run_once: true run_once: true
- name: Read scheduler config
command:
argv:
- nomad
- operator
- scheduler
- get-config
- -json
run_once: true
register: scheduler_config
changed_when: false
- name: Enable service scheduler preemption - name: Enable service scheduler preemption
command: command:
argv: argv:
@ -323,24 +306,12 @@
- operator - operator
- scheduler - scheduler
- set-config - set-config
- -preempt-system-scheduler=true
- -preempt-service-scheduler=true - -preempt-service-scheduler=true
environment: environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}" NOMAD_TOKEN: "{{ read_secretid.stdout }}"
delegate_to: "{{ play_hosts[0] }}"
run_once: true run_once: true
when: (scheduler_config.stdout | from_json)["SchedulerConfig"]["PreemptionConfig"]["ServiceSchedulerEnabled"] is false
- name: Enable system scheduler preemption
command:
argv:
- nomad
- operator
- scheduler
- set-config
- -preempt-system-scheduler=true
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
run_once: true
when: (scheduler_config.stdout | from_json)["SchedulerConfig"]["PreemptionConfig"]["SystemSchedulerEnabled"] is false
# - name: Set up Nomad backend and roles in Vault # - name: Set up Nomad backend and roles in Vault
# community.general.terraform: # community.general.terraform:

View File

@ -8,3 +8,23 @@
name: nomad name: nomad
state: stopped state: stopped
become: true become: true
- name: Stop Vault
hosts: nomad_instances
tasks:
- name: Stop Vault
systemd:
name: vault
state: stopped
become: true
- name: Stop Consul
hosts: consul_instances
tasks:
- name: Stop Consul
systemd:
name: consul
state: stopped
become: true

View File

@ -0,0 +1,27 @@
---
- name: Unseal Vault
hosts: vault_instances
tasks:
- name: Get Vault status
uri:
url: http://127.0.0.1:8200/v1/sys/health
method: GET
status_code: 200, 429, 472, 473, 501, 503
body_format: json
return_content: true
register: vault_status
- name: Unseal Vault
no_log: true
command:
argv:
- "vault"
- "operator"
- "unseal"
- "-address=http://127.0.0.1:8200/"
- "{{ item }}"
loop: "{{ unseal_keys_hex }}"
when:
- unseal_keys_hex is defined
- vault_status.json["sealed"]

View File

@ -0,0 +1,4 @@
consul_values:
"blocky/whitelists/ads": |
- |
somedomain.com

View File

@ -1,159 +0,0 @@
nomad/jobs:
base_hostname: VALUE
db_user_ro: VALUE
ldap_base_dn: VALUE
notify_email: VALUE
nomad/jobs/authelia:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
email_sender: VALUE
jwt_secret: VALUE
oidc_clients: VALUE
oidc_hmac_secret: VALUE
oidc_issuer_certificate_chain: VALUE
oidc_issuer_private_key: VALUE
session_secret: VALUE
storage_encryption_key: VALUE
nomad/jobs/authelia/authelia/stunnel:
redis_stunnel_psk: VALUE
nomad/jobs/backup:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-n1:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-n2:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-pi4:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/bazarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/blocky:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
mappings: VALUE
whitelists_ads: VALUE
nomad/jobs/blocky/blocky/stunnel:
redis_stunnel_psk: VALUE
nomad/jobs/ddclient:
domain: VALUE
domain_ddclient: VALUE
zone: VALUE
nomad/jobs/diun:
slack_hook_url: VALUE
nomad/jobs/git:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
oidc_secret: VALUE
secret_key: VALUE
smtp_sender: VALUE
nomad/jobs/grafana:
admin_pw: VALUE
alert_email_addresses: VALUE
db_name: VALUE
db_pass: VALUE
db_pass_ro: VALUE
db_user: VALUE
db_user_ro: VALUE
minio_access_key: VALUE
minio_secret_key: VALUE
oidc_secret: VALUE
slack_bot_token: VALUE
slack_bot_url: VALUE
slack_hook_url: VALUE
smtp_password: VALUE
smtp_user: VALUE
nomad/jobs/immich:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/lego:
acme_email: VALUE
domain_lego_dns: VALUE
usersfile: VALUE
nomad/jobs/lidarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/lldap:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
jwt_secret: VALUE
key_seed: VALUE
smtp_from: VALUE
smtp_reply_to: VALUE
nomad/jobs/minitor:
mailgun_api_key: VALUE
nomad/jobs/mysql-server:
mysql_root_password: VALUE
nomad/jobs/photoprism:
admin_password: VALUE
admin_user: VALUE
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/postgres-server:
superuser: VALUE
superuser_pass: VALUE
nomad/jobs/radarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/redis-authelia:
allowed_psks: VALUE
nomad/jobs/redis-blocky:
allowed_psks: VALUE
nomad/jobs/rediscommander:
redis_stunnel_psk: VALUE
nomad/jobs/sonarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/traefik:
external: VALUE
usersfile: VALUE
nomad/jobs/unifi-traffic-route-ips:
unifi_password: VALUE
unifi_username: VALUE
nomad/oidc:
secret: VALUE
secrets/ldap:
admin_email: VALUE
admin_password: VALUE
admin_user: VALUE
secrets/mysql:
mysql_root_password: VALUE
secrets/postgres:
superuser: VALUE
superuser_pass: VALUE
secrets/smtp:
password: VALUE
port: VALUE
server: VALUE
tls: VALUE
user: VALUE

View File

@ -0,0 +1,23 @@
# Example map of vault values to bootstrap
# These should be encrypted with Ansible Vault if actually stored here
hashi_vault_values:
nextcloud:
db_name: nextcloud
# Eventually replace this with dynamic secrets from Hashicorp Vault
db_user: nextcloud
db_pass: nextcloud
mysql:
root_password: supersecretpassword
slack:
bot_url: ...
bot_token: ...
hook_url: ...
grafana:
alert_email_addresses: email@example.com
backups:
backup_passphrase: tellnoone
vault_userpass:
- name: admin
password: foo
policies: default

View File

@ -1,2 +0,0 @@
---
wesher_key: "{{ vault_wesher_key }}"

View File

@ -1,53 +0,0 @@
- name: Create overlay network
hosts: nomad_instances
become: true
vars_files:
- vars/wesher_vars.yml
- vars/vault_wesher_vars.yml
vars:
wesher_key: "{{ wesher_key }}"
wesher_version: v0.2.6
wesher_arch_map:
x86_64: amd64
armv7l: arm
aarch64: arm64
wesher_arch: "{{ wesher_arch_map[ansible_architecture] }}"
# wesher_sha256_map:
# x86_64: 8c551ca211d7809246444765b5552a8d1742420c64eff5677d1e27a34c72aeef
# armv7l: 97f5bbf2b00b8b11a4ca224540bf9c1affdb15432c3b6ad8da4c1a7b6175eb5d
# aarch64: 507c6397d67ea90bddb3e1c06ec9d8e38d4342ed6f0f6b47855fecc9f1d6fae0
# wesher_checksum: sha256:{{ wesher_sha256_map[ansible_architecture] }}
wesher_checksum: sha256:https://github.com/costela/wesher/releases/download/{{ wesher_version }}/wesher.sha256sums
tasks:
- name: Download wesher
get_url:
url: https://github.com/costela/wesher/releases/download/{{ wesher_version }}/wesher-{{ wesher_arch }}
dest: /usr/local/sbin/wesher
checksum: "{{ wesher_checksum }}"
owner: root
mode: "0755"
- name: Install systemd unit
get_url:
url: https://github.com/costela/wesher/raw/{{ wesher_version }}/dist/wesher.service
dest: /etc/systemd/system/wesher.service
- name: Write wesher config
lineinfile:
path: /etc/default/wesher
create: true
regexp: "^{{ item.split('=')[0] }}"
line: "{{ item }}"
owner: root
mode: "0600"
loop:
- WESHER_CLUSTER_KEY={{ wesher_key }}
- WESHER_JOIN={% for host in ansible_play_hosts %}{{ hostvars[host].ansible_default_ipv4.address }}{% if not loop.last %},{% endif %}{% endfor %}
- name: Start wesher
systemd:
name: wesher.service
daemon_reload: true
state: started
enabled: true

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.0.0"
hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}

View File

@ -1,242 +0,0 @@
job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
datacenters = ["dc1"]
priority = 90
%{ if batch_node == null ~}
type = "system"
%{ else ~}
type = "batch"
parameterized {
meta_required = ["job_name"]
meta_optional = ["task", "snapshot"]
}
meta {
task = "backup"
snapshot = "latest"
}
%{ endif ~}
%{ if batch_node != null ~}
constraint {
attribute = "$${node.unique.name}"
value = "${batch_node}"
}
%{ endif ~}
group "backup" {
network {
mode = "bridge"
port "metrics" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 8080
}
}
volume "all-volumes" {
type = "host"
read_only = false
source = "all-volumes"
}
ephemeral_disk {
# Try to keep restic cache intact
sticky = true
}
service {
name = "backup"
provider = "nomad"
port = "metrics"
tags = [
"prometheus.scrape"
]
}
task "backup" {
driver = "docker"
shutdown_delay = "5m"
volume_mount {
volume = "all-volumes"
destination = "/data"
read_only = false
}
config {
image = "iamthefij/resticscheduler:0.4.0"
ports = ["metrics"]
args = [
%{ if batch_node != null ~}
"-once",
"-$${NOMAD_META_task}",
"$${NOMAD_META_job_name}",
"--snapshot",
"$${NOMAD_META_snapshot}",
"--push-gateway",
"http://pushgateway.nomad:9091",
%{ endif ~}
"$${NOMAD_TASK_DIR}/node-jobs.hcl",
]
}
action "unlockenv" {
command = "sh"
args = ["-c", "/bin/resticscheduler -once -unlock all $${NOMAD_TASK_DIR}/node-jobs.hcl"]
}
action "unlocktmpl" {
command = "/bin/resticscheduler"
args = ["-once", "-unlock", "all", "{{ env 'NOMAD_TASK_DIR' }}/node-jobs.hcl"]
}
action "unlockhc" {
command = "/bin/resticscheduler"
args = ["-once", "-unlock", "all", "/local/node-jobs.hcl"]
}
env = {
RCLONE_CHECKERS = "2"
RCLONE_TRANSFERS = "2"
RCLONE_FTP_CONCURRENCY = "5"
RESTIC_CACHE_DIR = "$${NOMAD_ALLOC_DIR}/data"
TZ = "America/Los_Angeles"
}
template {
data = <<EOF
MYSQL_HOST=127.0.0.1
MYSQL_PORT=3306
{{ with nomadVar "secrets/mysql" }}
MYSQL_USER=root
MYSQL_PASSWORD={{ .mysql_root_password }}
{{ end -}}
{{ with nomadVar "secrets/postgres" }}
POSTGRES_HOST=127.0.0.1
POSTGRES_PORT=5432
POSTGRES_USER={{ .superuser }}
POSTGRES_PASSWORD={{ .superuser_password }}
{{ end -}}
{{ with nomadVar (print "nomad/jobs/" (index (env "NOMAD_JOB_ID" | split "/") 0)) -}}
BACKUP_PASSPHRASE={{ .backup_passphrase }}
RCLONE_FTP_HOST={{ .nas_ftp_host }}
RCLONE_FTP_USER={{ .nas_ftp_user }}
RCLONE_FTP_PASS={{ .nas_ftp_pass.Value | toJSON }}
RCLONE_FTP_EXPLICIT_TLS=true
RCLONE_FTP_NO_CHECK_CERTIFICATE=true
AWS_ACCESS_KEY_ID={{ .nas_minio_access_key_id }}
AWS_SECRET_ACCESS_KEY={{ .nas_minio_secret_access_key }}
{{ end -}}
EOF
destination = "secrets/db.env"
env = true
}
template {
# Build jobs based on node
data = <<EOF
# Current node is {{ env "node.unique.name" }} {{ env "node.unique.id" }}
%{ for job_file in fileset(module_path, "jobs/*.hcl") ~}
{{ range nomadService 1 "backups" "${trimsuffix(basename(job_file), ".hcl")}" -}}
# ${trimsuffix(basename(job_file), ".hcl")} .Node {{ .Node }}
{{ if eq .Node (env "node.unique.id") -}}
${file("${module_path}/${job_file}")}
{{ end -}}
{{ end -}}
%{ endfor ~}
# Dummy job to keep task healthy on node without any stateful services
job "Dummy" {
schedule = "@daily"
config {
repo = "/local/dummy-repo"
passphrase = env("BACKUP_PASSPHRASE")
}
backup {
paths = ["/local/node-jobs.hcl"]
}
forget {
KeepLast = 1
}
}
EOF
destination = "local/node-jobs.hcl"
}
resources {
cpu = 50
memory = 500
}
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" }}
connect = {{ .Address }}:{{ .Port }}
{{ end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
[postgres_client]
client = yes
accept = 127.0.0.1:5432
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "postgres-tls" }}
connect = {{ .Address }}:{{ .Port }}
{{ end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/postgres_stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/backups" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
template {
data = <<EOF
{{- with nomadVar "secrets/postgres/allowed_psks/backups" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/postgres_stunnel_psk.txt"
}
}
}
}

View File

@ -1,136 +0,0 @@
resource "nomad_job" "backup" {
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
batch_node = null,
use_wesher = var.use_wesher
})
}
resource "nomad_job" "backup-oneoff" {
# TODO: Get list of nomad hosts dynamically
for_each = toset(["n1", "n2", "pi4"])
# for_each = toset([
# for node in data.consul_service.nomad.service :
# node.node_name
# ])
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
batch_node = each.key,
use_wesher = var.use_wesher
})
}
locals {
# NOTE: This can't be dynamic in first deploy since these values are not known
# all_job_ids = toset(flatten([[for job in resource.nomad_job.backup-oneoff : job.id], [resource.nomad_job.backup.id]]))
all_job_ids = toset(["backup", "backup-oneoff-n1", "backup-oneoff-n2", "backup-oneoff-pi4"])
}
resource "nomad_acl_policy" "secrets_mysql" {
for_each = local.all_job_ids
name = "${each.key}-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
}
}
resource "random_password" "mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "mysql_psk" {
path = "secrets/mysql/allowed_psks/backups"
items = {
psk = "backups:${resource.random_password.mysql_psk.result}"
}
}
resource "nomad_acl_policy" "mysql_psk" {
for_each = local.all_job_ids
name = "${each.key}-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/backups" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
group = "backup"
task = "stunnel"
}
}
resource "nomad_acl_policy" "secrets_postgres" {
for_each = local.all_job_ids
name = "${each.key}-secrets-postgres"
description = "Give access to Postgres secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
}
}
resource "random_password" "postgres_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "postgres_psk" {
path = "secrets/postgres/allowed_psks/backups"
items = {
psk = "backups:${resource.random_password.postgres_psk.result}"
}
}
resource "nomad_acl_policy" "postgres_psk" {
for_each = local.all_job_ids
name = "${each.key}-secrets-postgres-psk"
description = "Give access to Postgres PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres/allowed_psks/backups" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
group = "backup"
task = "stunnel"
}
}

View File

@ -1,54 +0,0 @@
job "authelia" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/authelia"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local authelia dir" {
pre_script {
on_backup = "mkdir -p /local/authelia"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "authelia"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/authelia/dump.sql"
}
}
backup {
paths = ["/local/authelia"]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,57 +0,0 @@
job "git" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/gitea"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local gitea dir" {
pre_script {
on_backup = "mkdir -p /local/gitea"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "gitea"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/gitea/dump.sql"
}
}
backup {
paths = [
"/local/gitea",
"/data/nas-container/gitea",
]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,64 +0,0 @@
job "lidarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/lidarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "lidarr"
no_tablespaces = true
dump_to = "/data/nas-container/lidarr/Backups/dump-lidarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "lidarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/lidarr/Backups/dump-lidarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/lidarr"]
backup_opts {
Exclude = [
"lidarr_backup_*.zip",
"/data/nas-container/lidarr/MediaCover",
"/data/nas-container/lidarr/logs",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,53 +0,0 @@
job "lldap" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/lldap"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local backup dir" {
pre_script {
on_backup = "mkdir -p /local/lldap"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
database = "lldap"
no_tablespaces = true
dump_to = "/local/lldap/dump.sql"
}
}
backup {
paths = ["/local/lldap"]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,41 +0,0 @@
job "nzbget" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/nzbget"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
backup {
paths = [
# Configuration
"/data/nas-container/nzbget",
# Queued nzb files
"/data/media-write/Downloads/nzb",
]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,60 +0,0 @@
job "photoprism" {
schedule = "10 * * * *"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/photoprism"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local photoprism dir" {
pre_script {
on_backup = "mkdir -p /local/photoprism"
}
}
task "Dump database" {
mysql "Dump database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "photoprism"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/photoprism/dump.sql"
}
}
backup {
paths = [
"/local/photoprism",
"/data/nas-container/photoprism",
]
backup_opts {
Host = "nomad"
Exclude = [
"/data/nas-container/photoprism/cache",
]
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,64 +0,0 @@
job "radarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/radarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "radarr"
no_tablespaces = true
dump_to = "/data/nas-container/radarr/Backups/dump-radarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "radarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/radarr/Backups/dump-radarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/radarr"]
backup_opts {
Exclude = [
"radarr_backup_*.zip",
"/data/nas-container/radarr/MediaCover",
"/data/nas-container/radarr/logs",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,36 +0,0 @@
job "sabnzbd" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/sabnzbd"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
backup {
paths = ["/data/media-write/Downloads/sabnzbd"]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,67 +0,0 @@
job "sonarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/sonarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "sonarr"
no_tablespaces = true
dump_to = "/data/nas-container/sonarr/Backups/dump-sonarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "sonarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/sonarr/Backups/dump-sonarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/sonarr"]
backup_opts {
Exclude = [
"sonarr_backup_*.zip",
"/data/nas-container/sonarr/MediaCover",
"/data/nas-container/sonarr/logs",
"*.db",
"*.db-shm",
"*.db-wal",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,5 +0,0 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

12
core.tf Normal file
View File

@ -0,0 +1,12 @@
module "databases" {
source = "./databases"
}
module "core" {
source = "./core"
base_hostname = var.base_hostname
# Metrics and Blocky depend on databases
depends_on = [module.databases]
}

View File

@ -1,40 +1,59 @@
# This file is maintained automatically by "terraform init". # This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/consul" {
version = "2.1.1" version = "2.16.2"
hashes = [ hashes = [
"h1:liQBgBXfQEYmwpoGZUfSsu0U0t/nhvuRZbMhaMz7wcQ=", "h1:epldE7sZPBTQHnWEA4WlNJIOVT1UEX+/02SMg5nniaE=",
"zh:28bc6922e8a21334568410760150d9d413d7b190d60b5f0b4aab2f4ef52efeeb", "zh:0a2e11ca2ba650954951a087a1daec95eee2f3000456b295409a9880c4a10b1a",
"zh:2d4283740e92ce1403875486cd5ff2c8acf9df28c190873ab4d769ce37db10c1", "zh:34f6bda06a0d1c213fa8d87d4313687681e67bc8c40c4cbaa7dbe59ce24a4f7e",
"zh:457e16d70075eae714a7df249d3ba42c2176f19b6750650152c56f33959028d9", "zh:5b85cf93db11ee890f720c317a38158927071feb634855786a0c0cd65825a43c",
"zh:49ee88371e355c00971eefee6b5001392431b47b3e760a5c649dda76f59fb8fa", "zh:75ef915f3d087e6045751a66fbb7066a852a0944ec8c97200d1134dd84df7ffc",
"zh:614ad3bf07155ed8a5ced41dafb09042afbd1868490a379654b3e970def8e33d", "zh:8a4a95697bd91ad51a581c12fe50ac61a114afba27895d027f77ac4154a7ea15",
"zh:75be7199d76987e7549e1f27439922973d1bf27353b44a593bfbbc2e3b9f698f", "zh:973d538c8d72793861a1ac9718249a9493f417a2b5096846367560054fd843b9",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:9feb2bdc06fdc2d8370cc9aad9a0c69e7e5ae38aac43f315c3f57507c57be030",
"zh:888e14a24410d56b37212fbea373a3e0401d0ff8f8e4f4dd00ba8b29de9fed39", "zh:c5709672d0afecbbe298bf519741ebcb9d04f02a73b5ee0c186dfa241aa5a524",
"zh:aa261925e8b152636a0886b3a2149864707632d836e98a88dacea6cfb6302082", "zh:c65c60570de6da7190e1e7762577655a463caeb59bc5d38e33034821ed0cbcb9",
"zh:ac10cefb4064b3bb63d4b0379624a416a45acf778eac0004466f726ead686196", "zh:c958d6282650fc472aade61d5df4300936033f43cfb898293ef86aceccdfdf1d",
"zh:b1a3c8b4d5b2dc9b510eac5e9e02665582862c24eb819ab74f44d3d880246d4f", "zh:cdd3632c81e1d11d3becd193aaa061688840f39147950c45c4301d042743ae6a",
"zh:c552e2fe5670b6d3ad9a5faf78e3a27197eeedbe2b13928d2c491fa509bc47c7", "zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
] ]
} }
provider "registry.terraform.io/hashicorp/random" { provider "registry.terraform.io/hashicorp/external" {
version = "3.6.0" version = "2.2.2"
hashes = [ hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=", "h1:e7RpnZ2PbJEEPnfsg7V0FNwbfSk0/Z3FdrLsXINBmDY=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d", "zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211", "zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829", "zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d", "zh:719d6ef39c50e4cffc67aa67d74d195adaf42afcf62beab132dafdb500347d39",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17", "zh:7fbfc4d37435ac2f717b0316f872f558f608596b389b895fcb549f118462d327",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21", "zh:8ac71408204db606ce63fe8f9aeaf1ddc7751d57d586ec421e62d440c402e955",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839", "zh:a4cacdb06f114454b6ed0033add28006afa3f65a0ea7a43befe45fc82e6809fb",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0", "zh:bb5ce3132b52ae32b6cc005bc9f7627b95259b9ffe556de4dad60d47d47f21f0",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c", "zh:bb60d2976f125ffd232a7ccb4b3f81e7109578b23c9c6179f13a11d125dca82a",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e", "zh:f9540ecd2e056d6e71b9ea5f5a5cf8f63dd5c25394b9db831083a9d4ea99b372",
"zh:ffd998b55b8a64d4335a090b6956b4bf8855b290f7554dd38db3302de9c41809",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.19"
hashes = [
"h1:EdBny2gaLr/IE+l+6csyCKeIGFMYZ/4tHKpcbS7ArgE=",
"zh:2f3ceeb3318a6304026035b0ac9ee3e52df04913bb9ee78827e58c5398b41254",
"zh:3fbe76c7d957d20dfe3c8c0528b33084651f22a95be9e0452b658e0922916e2a",
"zh:595671a05828cfe6c42ef73aac894ac39f81a52cc662a76f37eb74ebe04ddf75",
"zh:5d76e8788d2af3e60daf8076babf763ec887480bbb9734baccccd8fcddf4f03e",
"zh:676985afeaca6e67b22d60d43fd0ed7055763029ffebc3026089fe2fd3b4a288",
"zh:69152ce6164ac999a640cff962ece45208270e1ac37c10dac484eeea5cf47275",
"zh:6da0b15c05b81f947ec8e139bd81eeeb05c0d36eb5a967b985d0625c60998b40",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:822c0a3bbada5e38099a379db8b2e339526843699627c3be3664cc3b3752bab7",
"zh:af23af2f98a84695b25c8eba7028a81ad4aad63c44aefb79e01bbe2dc82e7f78",
"zh:e36cac9960b7506d92925b667254322520966b9c3feb3ca6102e57a1fb9b1761",
"zh:ffd1e096c1cc35de879c740a91918e9f06b627818a3cb4b1d87b829b54a6985f",
] ]
} }

View File

@ -1,185 +0,0 @@
module "authelia" {
source = "../services/service"
name = "authelia"
instance_count = 2
priority = 70
image = "authelia/authelia:4.37"
args = ["--config", "$${NOMAD_TASK_DIR}/authelia.yml"]
ingress = true
service_port = 9999
service_port_static = true
use_wesher = var.use_wesher
# metrics_port = 9959
env = {
AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/ldap_password.txt"
AUTHELIA_JWT_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
AUTHELIA_SESSION_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/session_secret.txt"
AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE = "$${NOMAD_SECRETS_DIR}/storage_encryption_key.txt"
AUTHELIA_STORAGE_MYSQL_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/mysql_password.txt"
AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/oidc_hmac_secret.txt"
AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_PRIVATE_KEY_FILE = "$${NOMAD_SECRETS_DIR}/oidc_issuer_private_key.txt"
# AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_CERTIFICATE_CHAIN_FILE = "$${NOMAD_SECRETS_DIR}/oidc_issuer_certificate_chain.txt"
}
use_mysql = true
use_ldap = true
use_redis = true
use_smtp = true
mysql_bootstrap = {
enabled = true
}
service_tags = [
# Configure traefik to add this middleware
"traefik.http.middlewares.authelia.forwardAuth.address=http://authelia.nomad:$${NOMAD_PORT_main}/api/verify?rd=https%3A%2F%2Fauthelia.${var.base_hostname}%2F",
"traefik.http.middlewares.authelia.forwardAuth.trustForwardHeader=true",
"traefik.http.middlewares.authelia.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email",
"traefik.http.middlewares.authelia-basic.forwardAuth.address=http://authelia.nomad:$${NOMAD_PORT_main}/api/verify?auth=basic",
"traefik.http.middlewares.authelia-basic.forwardAuth.trustForwardHeader=true",
"traefik.http.middlewares.authelia-basic.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email",
]
templates = [
{
data = file("${path.module}/authelia.yml")
dest = "authelia.yml"
mount = false
},
{
data = "{{ with nomadVar \"secrets/ldap\" }}{{ .admin_password }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "ldap_password.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .jwt_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "jwt_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .session_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "session_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .storage_encryption_key }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "storage_encryption_key.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .db_pass }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "mysql_password.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_hmac_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_hmac_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_issuer_private_key }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_issuer_private_key.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_issuer_certificate_chain }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_issuer_certificate_chain.txt"
mount = false
},
{
data = "{{ with nomadVar \"secrets/smtp\" }}{{ .password }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "smtp_password.txt"
mount = false
},
]
}
resource "nomad_acl_policy" "authelia" {
name = "authelia"
description = "Give access to shared authelia variables"
rules_hcl = <<EOH
namespace "default" {
variables {
path "authelia/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = module.authelia.job_id
}
}
# Give access to ldap secrets
resource "nomad_acl_policy" "authelia_ldap_secrets" {
name = "authelia-secrets-ldap"
description = "Give access to LDAP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/ldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = module.authelia.job_id
}
}
resource "nomad_acl_auth_method" "nomad_authelia" {
name = "authelia"
type = "OIDC"
token_locality = "global"
max_token_ttl = "1h0m0s"
default = true
config {
oidc_discovery_url = "https://authelia.${var.base_hostname}"
oidc_client_id = "nomad"
oidc_client_secret = yamldecode(file("${path.module}/../ansible_playbooks/vars/nomad_vars.yml"))["nomad/oidc"]["secret"]
bound_audiences = ["nomad"]
oidc_scopes = [
"groups",
"openid",
]
allowed_redirect_uris = [
"https://nomad.${var.base_hostname}/oidc/callback",
"https://nomad.${var.base_hostname}/ui/settings/tokens",
]
list_claim_mappings = {
"groups" : "roles"
}
}
}
resource "nomad_acl_binding_rule" "nomad_authelia_admin" {
description = "engineering rule"
auth_method = nomad_acl_auth_method.nomad_authelia.name
selector = "\"nomad-deploy\" in list.roles"
bind_type = "role"
bind_name = "admin" # acls.nomad_acl_role.admin.name
}
resource "nomad_acl_binding_rule" "nomad_authelia_deploy" {
description = "engineering rule"
auth_method = nomad_acl_auth_method.nomad_authelia.name
selector = "\"nomad-deploy\" in list.roles"
bind_type = "role"
bind_name = "deploy" # acls.nomad_acl_role.deploy.name
}

View File

@ -1,252 +0,0 @@
theme: auto
# jwt_secret: <file>
{{ with nomadVar "nomad/jobs" }}
default_redirection_url: https://authelia.{{ .base_hostname }}/
{{ end }}
## Set the default 2FA method for new users and for when a user has a preferred method configured that has been
## disabled. This setting must be a method that is enabled.
## Options are totp, webauthn, mobile_push.
default_2fa_method: ""
server:
host: 0.0.0.0
port: {{ env "NOMAD_PORT_main" }}
disable_healthcheck: false
log:
## Level of verbosity for logs: info, debug, trace.
level: debug
format: json
telemetry:
metrics:
enabled: false
# address: '0.0.0.0:{{ env "NOMAD_PORT_metrics" }}'
totp:
disable: false
issuer: {{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
digits: 6
## The TOTP algorithm to use.
## It is CRITICAL you read the documentation before changing this option:
## https://www.authelia.com/c/totp#algorithm
algorithm: sha1
webauthn:
disable: false
timeout: 60s
display_name: {{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
user_verification: preferred
duo_api:
disable: true
# hostname:
# integration_key:
# secret_key:
# enable_self_enrollment: false
authentication_backend:
disable_reset_password: false
## Password Reset Options.
password_reset:
## External reset password url that redirects the user to an external reset portal. This disables the internal reset
## functionality.
# TODO: not sure if this is needed, probably not?
custom_url: ""
refresh_interval: 5m
ldap:
implementation: custom
# stunnel url
url: ldap://127.0.0.1:389
timeout: 5s
# TODO: Maybe use stunnel for this
start_tls: false
base_dn: {{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}
additional_users_dn: ou=people
additional_groups_dn: ou=groups
username_attribute: uid
group_name_attribute: cn
mail_attribute: mail
display_name_attribute: displayName
# To allow sign in both with username and email, one can use a filter like
# (&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=person))
users_filter: "(&({username_attribute}={input})(objectClass=person))"
# Only supported filter by lldap right now
groups_filter: (member={dn})
## The username and password of the admin user.
{{ with nomadVar "secrets/ldap" }}
user: uid={{ .admin_user }},ou=people,{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}
{{ end }}
# password set using secrets file
# password: <secret>
password_policy:
standard:
enabled: false
min_length: 8
max_length: 0
require_uppercase: true
require_lowercase: true
require_number: true
require_special: true
zxcvbn:
enabled: false
min_score: 3
##
## Access Control Configuration
##
## Access control is a list of rules defining the authorizations applied for one resource to users or group of users.
##
## If 'access_control' is not defined, ACL rules are disabled and the 'bypass' rule is applied, i.e., access is allowed
## to anyone. Otherwise restrictions follow the rules defined.
##
## Note: One can use the wildcard * to match any subdomain.
## It must stand at the beginning of the pattern. (example: *.mydomain.com)
##
## Note: You must put patterns containing wildcards between simple quotes for the YAML to be syntactically correct.
##
## Definition: A 'rule' is an object with the following keys: 'domain', 'subject', 'policy' and 'resources'.
##
## - 'domain' defines which domain or set of domains the rule applies to.
##
## - 'subject' defines the subject to apply authorizations to. This parameter is optional and matching any user if not
## provided. If provided, the parameter represents either a user or a group. It should be of the form
## 'user:<username>' or 'group:<groupname>'.
##
## - 'policy' is the policy to apply to resources. It must be either 'bypass', 'one_factor', 'two_factor' or 'deny'.
##
## - 'resources' is a list of regular expressions that matches a set of resources to apply the policy to. This parameter
## is optional and matches any resource if not provided.
##
## Note: the order of the rules is important. The first policy matching (domain, resource, subject) applies.
access_control:
## Default policy can either be 'bypass', 'one_factor', 'two_factor' or 'deny'. It is the policy applied to any
## resource if there is no policy to be applied to the user.
default_policy: deny
networks:
- name: internal
networks:
- 192.168.1.0/24
- 192.168.2.0/24
- 192.168.10.0/24
- name: VPN
networks: 192.168.5.0/24
rules:
{{ range nomadVarList "authelia/access_control/service_rules" }}{{ with nomadVar .Path }}
- domain: '{{ .name }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
{{ .rule.Value | indent 6 }}
{{ end }}{{ end }}
## Rules applied to everyone
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
networks:
- internal
policy: one_factor
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
policy: two_factor
- domain:
# TODO: Drive these from Nomad variables
- 'secure.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
policy: two_factor
session:
## The name of the session cookie.
name: authelia_session
domain: {{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
# Stored in a secrets file
# secret: <in file>
expiration: 1h
inactivity: 5m
remember_me_duration: 1M
redis:
host: 127.0.0.1
port: 6379
# username: authelia
# password: authelia
# database_index: 0
maximum_active_connections: 8
minimum_idle_connections: 0
regulation:
max_retries: 3
find_time: 2m
ban_time: 5m
##
## Storage Provider Configuration
##
## The available providers are: `local`, `mysql`, `postgres`. You must use one and only one of these providers.
storage:
# encryption_key: <in file>
##
## MySQL / MariaDB (Storage Provider)
##
mysql:
host: 127.0.0.1
port: 3306
{{ with nomadVar "nomad/jobs/authelia" }}
database: {{ .db_name }}
username: {{ .db_user }}
# password: <in_file>
{{- end }}
timeout: 5s
##
## Notification Provider
##
## Notifications are sent to users when they require a password reset, a Webauthn registration or a TOTP registration.
## The available providers are: filesystem, smtp. You must use only one of these providers.
notifier:
## You can disable the notifier startup check by setting this to true.
disable_startup_check: false
{{ with nomadVar "secrets/smtp" }}
smtp:
host: {{ .server }}
port: {{ .port }}
username: {{ .user }}
# password: <in file>
{{- end }}
{{ with nomadVar "nomad/jobs/authelia" }}
sender: "{{ .email_sender }}"
## Subject configuration of the emails sent. {title} is replaced by the text from the notifier.
subject: "[Authelia] {title}"
## This address is used during the startup check to verify the email configuration is correct.
## It's not important what it is except if your email server only allows local delivery.
startup_check_address: test@iamthefij.com
{{- end }}
identity_providers:
oidc:
# hmac_secret: <file>
# issuer_private_key: <file>
clients: {{ with nomadVar "nomad/jobs/authelia" }}{{ .oidc_clients.Value }}{{ end }}

View File

@ -2,39 +2,20 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "2.0.0" version = "1.4.16"
hashes = [ hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=", "h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72", "h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199", "zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a", "zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc", "zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf", "zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251", "zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc", "zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599", "zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c", "zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543", "zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e", "zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
] ]
} }

View File

@ -5,25 +5,16 @@ variable "config_data" {
job "blocky" { job "blocky" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "system"
priority = 100 priority = 100
constraint {
distinct_hosts = true
}
update { update {
max_parallel = 1 max_parallel = 1
# TODO: maybe switch to service job from system so we can use canary and autorollback # TODO: maybe switch to service job from system so we can use canary and autorollback
auto_revert = true # auto_revert = true
min_healthy_time = "60s"
healthy_deadline = "5m"
} }
group "blocky" { group "blocky" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/12023
count = 3
network { network {
mode = "bridge" mode = "bridge"
@ -33,17 +24,14 @@ job "blocky" {
} }
port "api" { port "api" {
%{~ if use_wesher ~} # TODO: This may be broken. It seems we're exposing the loopback address which can't be reached
host_network = "wesher" # host_network = "loopback"
%{~ endif ~}
to = "4000" to = "4000"
} }
dns { dns {
# Set expclicit DNS servers because tasks, by default, use this task # Set expclicit DNS servers because tasks, by default, use this task
servers = [ servers = ["1.1.1.1", "1.0.0.1"]
"192.168.2.1",
]
} }
} }
@ -58,8 +46,11 @@ job "blocky" {
provider = "nomad" provider = "nomad"
port = "api" port = "api"
meta {
metrics_addr = "${NOMAD_ADDR_api}"
}
tags = [ tags = [
"prometheus.scrape",
"traefik.enable=true", "traefik.enable=true",
"traefik.http.routers.blocky-api.entryPoints=websecure", "traefik.http.routers.blocky-api.entryPoints=websecure",
] ]
@ -71,11 +62,6 @@ job "blocky" {
path = "/" path = "/"
interval = "10s" interval = "10s"
timeout = "3s" timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
} }
} }
@ -83,106 +69,30 @@ job "blocky" {
driver = "docker" driver = "docker"
config { config {
image = "ghcr.io/0xerr0r/blocky:v0.22" image = "ghcr.io/0xerr0r/blocky"
args = ["-c", "$${NOMAD_TASK_DIR}/config.yml"]
ports = ["dns", "api"] ports = ["dns", "api"]
mount {
type = "bind"
target = "/app/config.yml"
source = "app/config.yml"
}
} }
resources { resources {
cpu = 50 cpu = 50
memory = 75 memory = 50
memory_max = 150 memory_max = 100
} }
template { template {
data = var.config_data data = var.config_data
destination = "$${NOMAD_TASK_DIR}/config.yml" destination = "app/config.yml"
splay = "1m" splay = "1m"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ range nomadServices }}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") .Name -}}
{{ .Address }} {{ .Name }}.nomad
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/nomad.hosts"
change_mode = "noop"
wait {
min = "10s"
max = "20s"
}
} }
} }
task "stunnel" { task "blocky-bootstrap" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 20
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
[mysql_client]
client = yes
accept = 127.0.0.1:3306
connect = {{ .Address }}:{{ .Port }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
{{- end }}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}}
[redis_client]
client = yes
accept = 127.0.0.1:6379
connect = {{ .Address }}:{{ .Port }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/blocky" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
template {
data = <<EOF
{{- with nomadVar "nomad/jobs/blocky/blocky/stunnel" -}}{{ .redis_stunnel_psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
}
task "mysql-bootstrap" {
driver = "docker" driver = "docker"
lifecycle { lifecycle {
@ -193,21 +103,21 @@ EOF
config { config {
image = "mariadb:10" image = "mariadb:10"
args = [ args = [
"/usr/bin/timeout",
"2m",
"/bin/bash", "/bin/bash",
"-c", "-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done", "/usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql",
] ]
} }
template { template {
data = <<EOF data = <<EOF
[client] [client]
host=127.0.0.1 {{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-server" -}}
port=3306 host={{ .Address }}
port={{ .Port }}
{{ end -}}
user=root user=root
{{ with nomadVar "secrets/mysql" }} {{ with nomadVar "nomad/jobs" }}
password={{ .mysql_root_password }} password={{ .mysql_root_password }}
{{ end }} {{ end }}
EOF EOF
@ -216,20 +126,21 @@ password={{ .mysql_root_password }}
template { template {
data = <<EOF data = <<EOF
{{ with nomadVar "nomad/jobs/blocky" }}{{ if .db_name -}} {{ with nomadVar "nomad/jobs/blocky" -}}
{{ if .db_name -}}
{{ $db_name := .db_name }} {{ $db_name := .db_name }}
CREATE DATABASE IF NOT EXISTS `{{ $db_name }}`; CREATE DATABASE IF NOT EXISTS `{{ $db_name }}`;
CREATE USER IF NOT EXISTS '{{ .db_user }}'@'%' IDENTIFIED BY '{{ .db_pass }}'; CREATE USER IF NOT EXISTS '{{ .db_user }}'@'%' IDENTIFIED BY '{{ .db_pass }}';
GRANT ALL ON `{{ $db_name }}`.* to '{{ .db_user }}'@'%'; GRANT ALL ON `{{ $db_name }}`.* to '{{ .db_user }}'@'%';
{{ with nomadVar "nomad/jobs" -}}
{{ with nomadService "grafana" }}{{ with nomadVar "nomad/jobs" -}} -- Add grafana read_only user
-- Grant grafana read_only user access to db CREATE USER IF NOT EXISTS '{{ .db_user_ro }}'@'%' IDENTIFIED BY '{{ .db_pass_ro }}';
GRANT SELECT ON `{{ $db_name }}`.* to '{{ .db_user_ro }}'@'%'; GRANT SELECT ON `{{ $db_name }}`.* to '{{ .db_user_ro }}'@'%';
{{ end }}{{ end -}} {{ end -}}
{{ else -}} {{ else -}}
SELECT 'NOOP'; SELECT 'NOOP';
{{ end -}}{{ end -}} {{ end -}}
{{ end -}}
EOF EOF
destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql" destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql"
} }

View File

@ -1,68 +1,25 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
locals { locals {
config_data = file("${path.module}/config.yml") config_data = templatefile(
"${path.module}/config.yml",
{
"base_hostname" = var.base_hostname,
}
)
} }
resource "nomad_job" "blocky" { resource "nomad_job" "blocky" {
hcl2 { hcl2 {
enabled = true
vars = { vars = {
"config_data" = local.config_data, "config_data" = local.config_data,
} }
} }
jobspec = templatefile("${path.module}/blocky.nomad", { jobspec = file("${path.module}/blocky.nomad")
use_wesher = var.use_wesher,
})
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "blocky_mysql_bootstrap_secrets" {
name = "blocky-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "mysql-bootstrap"
}
}
resource "random_password" "blocky_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "blocky_mysql_psk" {
path = "secrets/mysql/allowed_psks/blocky"
items = {
psk = "blocky:${resource.random_password.blocky_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "blocky_mysql_psk" {
name = "blocky-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/blocky" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "stunnel"
}
} }

View File

@ -1,53 +1,32 @@
ports:
dns: 53
http: 4000
bootstrapDns: bootstrapDns:
- upstream: 1.1.1.1 ips:
- upstream: 1.0.0.1 - 1.1.1.1
- upstream: 9.9.9.9 - 1.0.0.1
- upstream: 149.112.112.112
upstream:
upstreams: default:
groups: - 1.1.1.1
default: - 1.0.0.1
- https://dns.quad9.net/dns-query quad9:
- tcp-tls:dns.quad9.net - 9.9.9.9
- https://one.one.one.one/dns-query - 149.112.112.112
- tcp-tls:one.one.one.one - 2620:fe::fe
cloudflare: - 2620:fe::9
- 1.1.1.1 - https://dns.quad9.net/dns-query
- 1.0.0.1 - tcp-tls:dns.quad9.net
- 2606:4700:4700::1111 quad9-unsecured:
- 2606:4700:4700::1001 - 9.9.9.10
- https://one.one.one.one/dns-query - 149.112.112.10
- tcp-tls:one.one.one.one - 2620:fe::10
quad9: - 2620:fe::fe:10
- 9.9.9.9 - https://dns10.quad9.net/dns-query
- 149.112.112.112 - tcp-tls:dns10.quad9.net
- 2620:fe::fe
- 2620:fe::9
- https://dns.quad9.net/dns-query
- tcp-tls:dns.quad9.net
quad9-secured:
- 9.9.9.11
- 149.112.112.11
- 2620:fe::11
- 2620:fe::fe:11
- https://dns11.quad9.net/dns-query
- tcp-tls:dns11.quad9.net
quad9-unsecured:
- 9.9.9.10
- 149.112.112.10
- 2620:fe::10
- 2620:fe::fe:10
- https://dns10.quad9.net/dns-query
- tcp-tls:dns10.quad9.net
conditional: conditional:
fallbackUpstream: false fallbackUpstream: false
mapping: mapping:
# TODO: Run a simple dns server that this can forward to where it's hosts are set by nomad-services
# consul: {{ env "attr.unique.network.ip-address" }}:8600
home.arpa: 192.168.2.1 home.arpa: 192.168.2.1
in-addr.arpa: 192.168.2.1 in-addr.arpa: 192.168.2.1
iot: 192.168.2.1 iot: 192.168.2.1
@ -55,13 +34,6 @@ conditional:
thefij: 192.168.2.1 thefij: 192.168.2.1
.: 192.168.2.1 .: 192.168.2.1
hostsFile:
sources:
- {{ env "NOMAD_TASK_DIR" }}/nomad.hosts
hostsTTL: 30s
loading:
refreshPeriod: 30s
clientLookup: clientLookup:
upstream: 192.168.2.1 upstream: 192.168.2.1
@ -72,50 +44,27 @@ blocking:
- http://sysctl.org/cameleon/hosts - http://sysctl.org/cameleon/hosts
- https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt - https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt
- https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt - https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
# - https://hosts-file.net/ad_servers.txt - https://hosts-file.net/ad_servers.txt
smarttv: smarttv:
- https://perflyst.github.io/PiHoleBlocklist/SmartTV.txt - https://perflyst.github.io/PiHoleBlocklist/SmartTV.txt
# - https://perflyst.github.io/PiHoleBlocklist/regex.list - https://perflyst.github.io/PiHoleBlocklist/regex.list
wemo: malware:
- | - https://mirror1.malwaredomains.com/files/justdomains
# Remote commands
api.xbcs.net
# Firmware updates
fw.xbcs.net
# TURN service
nat.wemo2.com
# Connectivity checks
heartbeat.xwemo.com
antisocial:
- |
facebook.com
instagram.com
reddit.com
twitter.com
youtube.com
custom:
- https://git.thefij.rocks/iamthefij/blocklists/raw/branch/main/block
whiteLists: whiteLists:
# Move to Gitea when deployed internally
ads: ads:
{{ with nomadVar "nomad/jobs/blocky" -}} {{ with nomadVar "nomad/jobs/blocky" -}}
{{ .whitelists_ads.Value | indent 6 }} {{ .whitelists_ads | indent 6 }}
{{- end }} {{- end }}
custom:
- https://git.thefij.rocks/iamthefij/blocklists/raw/branch/main/allow
clientGroupsBlock: clientGroupsBlock:
default: default:
- ads - ads
- custom - malware
- smarttv - smarttv
- wemo
customDNS: customDNS:
customTTL: 1h customTTL: 1h
mapping: mapping:
{{ with nomadVar "nomad/jobs/blocky" }}{{ .mappings.Value | indent 4 }}{{ end }}
# Catch all at top domain to traefik
{{ with nomadService "traefik" -}} {{ with nomadService "traefik" -}}
{{- $last := len . | subtract 1 -}} {{- $last := len . | subtract 1 -}}
{{- $services := . -}} {{- $services := . -}}
@ -123,26 +72,32 @@ customDNS:
{{- with index $services $i }}{{ .Address }},{{ end -}} {{- with index $services $i }}{{ .Address }},{{ end -}}
{{- end -}} {{- end -}}
{{- with index . $last }}{{ .Address }}{{ end -}} {{- with index . $last }}{{ .Address }}{{ end -}}
{{- end }} {{- end }}
# Other mappings
{{ with nomadVar "nomad/jobs/blocky" }}{{ .mappings | indent 4 }}{{ end }}
prometheus: prometheus:
enable: true enable: true
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}} {{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis" -}}
redis: redis:
address: 127.0.0.1:6379 address: {{ .Address }}:{{ .Port }}
# password: "" # password: ""
# database: 0 # database: 0
connectionAttempts: 10 connectionAttempts: 10
connectionCooldown: 3s connectionCooldown: 3s
{{ end -}} {{ end -}}
{{ $mysql_addr := "" }}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}} {{ with nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-server" -}}{{ range . -}}
{{ $mysql_addr = print .Address ":" .Port }}
{{- end }}{{- end }}
{{ with nomadVar "nomad/jobs/blocky" -}} {{ with nomadVar "nomad/jobs/blocky" -}}
queryLog: queryLog:
type: mysql type: mysql
target: {{ .db_user }}:{{ .db_pass }}@tcp(127.0.0.1:3306)/{{ .db_name }}?charset=utf8mb4&parseTime=True&loc=Local target: {{ .db_user }}:{{ .db_pass }}@tcp({{ $mysql_addr }})/{{ .db_name }}?charset=utf8mb4&parseTime=True&loc=Local
logRetentionDays: 14 logRetentionDays: 14
{{ end -}} {{ end -}}
{{ end -}}
port: 53
httpPort: 4000

View File

@ -1,5 +0,0 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

View File

@ -6,9 +6,8 @@ job "ddclient" {
task "ddclient" { task "ddclient" {
driver = "docker" driver = "docker"
config { config {
image = "ghcr.io/linuxserver/ddclient:v3.10.0-ls104" image = "linuxserver/ddclient:3.9.1"
mount { mount {
type = "bind" type = "bind"
@ -23,13 +22,14 @@ job "ddclient" {
daemon=900 daemon=900
ssl=yes ssl=yes
use=web use=web
web=api.myip.com
protocol=cloudflare, protocol=cloudflare,
zone={{ .zone }}, zone={{ .zone }},
ttl=1, ttl=1,
login=token, login={{ .cloudflare_api_user }},
password={{ .domain_ddclient }} password={{ .cloudflare_api_key }}
# login=token,
# password={{ .cloudflare_api_token_dns_edit_all }}
{{ .domain }} {{ .domain }}
{{- end }} {{- end }}

View File

@ -1,293 +0,0 @@
job "grafana" {
datacenters = ["dc1"]
group "grafana" {
count = 1
network {
mode = "bridge"
port "web" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 3000
}
}
ephemeral_disk {
migrate = true
sticky = true
}
service {
name = "grafana"
provider = "nomad"
port = "web"
tags = [
"traefik.enable=true",
"traefik.http.routers.grafana.entryPoints=websecure",
]
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/grafana" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
}
task "mysql-bootstrap" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "mariadb:10"
args = [
"/usr/bin/timeout",
"2m",
"/bin/bash",
"-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
]
}
template {
data = <<EOF
[client]
host=127.0.0.1
port=3306
user=root
{{ with nomadVar "secrets/mysql" -}}
password={{ .mysql_root_password }}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/grafana" -}}
{{ if .db_name -}}
CREATE DATABASE IF NOT EXISTS `{{ .db_name }}`;
CREATE USER IF NOT EXISTS '{{ .db_user }}'@'%' IDENTIFIED BY '{{ .db_pass }}';
GRANT ALL ON `{{ .db_name }}`.* to '{{ .db_user }}'@'%';
-- Create Read Only user
CREATE USER IF NOT EXISTS '{{ .db_user_ro }}'@'%' IDENTIFIED BY '{{ .db_pass_ro }}';
{{ else -}}
SELECT 'NOOP';
{{ end -}}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql"
}
resources {
cpu = 50
memory = 50
}
}
task "grafana" {
driver = "docker"
config {
image = "grafana/grafana:10.0.10"
args = ["--config", "$${NOMAD_ALLOC_DIR}/config/grafana.ini"]
ports = ["web"]
}
env = {
"GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel,natel-discrete-panel",
"GF_PATHS_CONFIG" = "$${NOMAD_ALLOC_DIR}/config/grafana.ini",
"GF_PATHS_PROVISIONING" = "$${NOMAD_ALLOC_DIR}/config/provisioning",
}
template {
data = <<EOF
{{ with nomadVar "secrets/smtp" -}}
GF_SMTP_USER={{ .user }}
GF_SMTP_PASSWORD={{ .password }}
{{ end -}}
{{ with nomadVar "nomad/jobs/grafana" -}}
GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }}
GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .minio_access_key }}
GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .minio_secret_key }}
GRAFANA_ALERT_EMAIL_ADDRESSES={{ .alert_email_addresses }}
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET={{ .oidc_secret }}
{{ if .db_name -}}
# Database storage
GF_DATABASE_TYPE=mysql
GF_DATABASE_HOST=127.0.0.1:3306
GF_DATABASE_NAME={{ .db_name }}
GF_DATABASE_USER={{ .db_user }}
GF_DATABASE_PASSWORD={{ .db_pass }}
{{ end -}}
SLACK_BOT_URL={{ .slack_bot_url }}
SLACK_BOT_TOKEN={{ .slack_bot_token }}
SLACK_HOOK_URL={{ .slack_hook_url }}
{{ end -}}
EOF
env = true
destination = "secrets/conf.env"
}
resources {
cpu = 100
memory = 200
}
}
task "grafana-reprovisioner" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/startup.sh"]
}
resources {
cpu = 100
memory = 100
}
env = {
LOG_FILE = "/var/log/grafana_reloader.log"
}
template {
data = <<EOF
#! /bin/sh
apk add curl
touch "$LOG_FILE"
exec tail -f "$LOG_FILE"
EOF
perms = "777"
destination = "$${NOMAD_TASK_DIR}/startup.sh"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/grafana" -}}
GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }}
{{ end -}}
EOF
env = true
destination = "$${NOMAD_SECRETS_DIR}/conf.env"
}
template {
data = <<EOF
#! /bin/sh
exec > "$LOG_FILE"
exec 2>&1
GRAFANA_URL=http://127.0.0.1:3000
echo "Reload dashboards"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/dashboards/reload
echo "Reload datasources"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/datasources/reload
echo "Reload plugins"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/plugins/reload
echo "Reload notifications"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/notifications/reload
echo "Reload access-control"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/access-control/reload
echo "Reload alerting"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/alerting/reload
EOF
change_mode = "noop"
perms = "777"
destination = "$${NOMAD_TASK_DIR}/reload_config.sh"
}
%{ for config_file in fileset(join("/", [module_path, "grafana"]), "**") ~}
template {
data = <<EOF
${file(join("/", [module_path, "grafana", config_file]))}
EOF
destination = "$${NOMAD_ALLOC_DIR}/config/${config_file}"
perms = 777
# Set owner to grafana uid
# uid = 472
# Change template delimeter for dashboard files that use json and have double curly braces and square braces
%{ if endswith(config_file, ".json") ~}
left_delimiter = "<<<<"
right_delimiter = ">>>>"
%{ endif }
change_mode = "script"
change_script {
command = "/local/reload_config.sh"
}
}
%{ endfor }
}
task "grafana-image-renderer" {
driver = "docker"
constraint {
attribute = "$${attr.cpu.arch}"
value = "amd64"
}
config {
image = "grafana/grafana-image-renderer:3.6.1"
ports = ["renderer"]
}
env = {
"RENDERING_MODE" = "clustered"
"RENDERING_CLUSTERING_MODE" = "browser"
"RENDERING_CLUSTERING_MAX_CONCURRENCY" = 5
"RENDERING_CLUSTERING_TIMEOUT" = 30
}
}
}
}

View File

@ -1,19 +0,0 @@
---
apiVersion: 1
datasources:
- name: HASS Metrics
url: "http://192.168.2.75:8086"
type: influxdb
access: proxy
database: hass
jsonData:
dbName: hass
- name: Proxmox Metrics
url: "http://192.168.2.75:8086"
type: influxdb
access: proxy
database: proxmox
jsonData:
dbName: proxmox

View File

@ -1,16 +0,0 @@
---
apiVersion: 1
datasources:
- name: Blocky logs
url: 127.0.0.1:3306
# TODO: Looking for an acl friendly way to expose this since it's a variable in blocky setup
database: blocky
type: mysql
isDefault: false
version: 1
{{ with nomadVar "nomad/jobs/grafana" -}}
user: {{ .db_user_ro }}
secureJsonData:
password: {{ .db_pass_ro }}
{{- end }}

View File

@ -1,97 +0,0 @@
variable "lego_version" {
default = "4.14.2"
type = string
}
variable "nomad_var_dirsync_version" {
default = "0.0.2"
type = string
}
job "lego" {
type = "batch"
periodic {
cron = "@weekly"
prohibit_overlap = true
}
group "main" {
network {
dns {
servers = ["1.1.1.1", "1.0.0.1"]
}
}
task "main" {
driver = "exec"
config {
# image = "alpine:3"
command = "/bin/bash"
args = ["${NOMAD_TASK_DIR}/start.sh"]
}
artifact {
source = "https://github.com/go-acme/lego/releases/download/v${var.lego_version}/lego_v${var.lego_version}_linux_${attr.cpu.arch}.tar.gz"
}
artifact {
source = "https://git.iamthefij.com/iamthefij/nomad-var-dirsync/releases/download/v${var.nomad_var_dirsync_version}/nomad-var-dirsync-linux-${attr.cpu.arch}.tar.gz"
}
template {
data = <<EOH
#! /bin/sh
set -ex
cd ${NOMAD_TASK_DIR}
echo "Read certs from nomad vars"
${NOMAD_TASK_DIR}/nomad-var-dirsync-linux-{{ env "attr.cpu.arch" }} -root-var=secrets/certs read .
action=run
if [ -f /.lego/certificates/_.thefij.rocks.crt ]; then
action=renew
fi
echo "Attempt to $action certificates"
${NOMAD_TASK_DIR}/lego \
--accept-tos --pem \
--email=iamthefij@gmail.com \
--domains="*.thefij.rocks" \
--dns="cloudflare" \
$action \
--$action-hook="${NOMAD_TASK_DIR}/nomad-var-dirsync-linux-{{ env "attr.cpu.arch" }} -root-var=secrets/certs write .lego" \
EOH
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/lego" -}}
CF_DNS_API_TOKEN={{ .domain_lego_dns }}
CF_ZONE_API_TOKEN={{ .domain_lego_dns }}
{{- end }}
EOH
destination = "secrets/cloudflare.env"
env = true
}
env = {
NOMAD_ADDR = "unix:///secrets/api.sock"
}
identity {
env = true
}
resources {
cpu = 50
memory = 100
}
}
}
}

View File

@ -1,23 +0,0 @@
resource "nomad_job" "lego" {
jobspec = file("${path.module}/lego.nomad")
}
resource "nomad_acl_policy" "secrets_certs_write" {
name = "secrets-certs-write"
description = "Write certs to secrets store"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/certs/*" {
capabilities = ["write", "read"]
}
path "secrets/certs" {
capabilities = ["write", "read"]
}
}
}
EOH
job_acl {
job_id = "lego/*"
}
}

104
core/lldap.nomad Normal file
View File

@ -0,0 +1,104 @@
job "lldap" {
datacenters = ["dc1"]
type = "service"
group "lldap" {
network {
mode = "bridge"
port "web" {
to = 17170
}
port "ldap" {
to = 3890
}
}
volume "lldap-data" {
type = "host"
read_only = false
source = "lldap-data"
}
service {
name = "lldap"
provider = "nomad"
port = "ldap"
}
service {
name = "ldap-admin"
provider = "nomad"
port = "web"
tags = [
"traefik.enable=true",
"traefik.http.routers.ldap-admin.entryPoints=websecure",
]
}
task "lldap" {
driver = "docker"
volume_mount {
volume = "lldap-data"
destination = "/data"
read_only = false
}
config {
image = "nitnelave/lldap:v0.4"
ports = ["ldap", "web"]
args = ["run", "--config-file", "/lldap_config.toml"]
mount {
type = "bind"
source = "secrets/lldap_config.toml"
target = "/lldap_config.toml"
}
}
env = {
"LLDAP_LDAP_PORT" = "${NOMAD_PORT_ldap}"
"LLDAP_HTTP_PORT" = "${NOMAD_PORT_web}"
}
template {
data = <<EOH
database_url = "sqlite:///data/users.db?mode=rwc"
key_file = "/data/private_key"
ldap_base_dn = "{{ with nomadVar "nomad/jobs" }}{{ .base_dn }}{{ end }}"
{{ with nomadVar "nomad/jobs/lldap" }}
jwt_secret = "{{ .jwt_secret }}"
ldap_user_dn = "{{ .admin_user }}"
ldap_user_email = "{{ .admin_email }}"
ldap_user_pass = "{{ .admin_password }}"
{{ end -}}
{{ with nomadVar "nomad/jobs" -}}
[smtp_options]
enable_password_reset = true
server = "{{ .smtp_server }}"
port = {{ .smtp_port }}
tls_required = {{ .smtp_tls }}
user = "{{ .smtp_user }}"
password = "{{ .smtp_password }}"
{{ end -}}
{{ with nomadVar "nomad/jobs/lldap" -}}
from = "{{ .smtp_from }}"
reply_to = "{{ .smtp_reply_to }}"
{{ end -}}
EOH
destination = "secrets/lldap_config.toml"
change_mode = "restart"
}
resources {
cpu = 10
memory = 200
memory_max = 200
}
}
}
}

View File

@ -3,27 +3,31 @@ auth_enabled: false
server: server:
http_listen_port: 3100 http_listen_port: 3100
common: ingester:
ring: lifecycler:
instance_addr: 127.0.0.1 address: 127.0.0.1
kvstore: ring:
store: inmemory kvstore:
replication_factor: 1 store: inmemory
path_prefix: /tmp/loki replication_factor: 1
final_sleep: 0s
chunk_idle_period: 5m
chunk_retain_period: 30s
max_transfer_retries: 0
schema_config: schema_config:
configs: configs:
- from: 2020-05-15 - from: 2018-04-15
store: boltdb-shipper store: boltdb
object_store: filesystem object_store: filesystem
schema: v11 schema: v11
index: index:
prefix: index_ prefix: index_
period: 24h period: 168h
storage_config: storage_config:
boltdb_shipper: boltdb:
active_index_directory: {{ env "NOMAD_TASK_DIR" }}/index directory: {{ env "NOMAD_TASK_DIR" }}/index
filesystem: filesystem:
directory: {{ env "NOMAD_TASK_DIR" }}/chunks directory: {{ env "NOMAD_TASK_DIR" }}/chunks
@ -34,8 +38,8 @@ limits_config:
reject_old_samples_max_age: 168h reject_old_samples_max_age: 168h
chunk_store_config: chunk_store_config:
max_look_back_period: 168h max_look_back_period: 0s
table_manager: table_manager:
retention_deletes_enabled: true retention_deletes_enabled: false
retention_period: 168h retention_period: 0s

View File

@ -1,24 +0,0 @@
module "loki" {
source = "../services/service"
detach = false
name = "loki"
image = "grafana/loki:2.8.7"
args = ["--config.file=$${NOMAD_TASK_DIR}/loki-config.yml"]
service_port = 3100
ingress = true
use_wesher = var.use_wesher
service_check = {
path = "/ready"
}
sticky_disk = true
templates = [
{
data = file("${path.module}/loki-config.yml")
dest = "loki-config.yml"
mount = false
}
]
}

View File

@ -1,27 +1,69 @@
module "blocky" { module "blocky" {
source = "./blocky" source = "./blocky"
use_wesher = var.use_wesher base_hostname = var.base_hostname
# Not in this module # Not in this module
# depends_on = [module.databases] # depends_on = [module.databases]
} }
module "traefik" { module "traefik" {
source = "./traefik" source = "./traefik"
base_hostname = var.base_hostname
} }
resource "nomad_job" "nomad-client-stalker" { module "nomad_login" {
# Stalker used to allow using Nomad service registry to identify nomad client hosts source = "IamTheFij/levant/nomad"
jobspec = file("${path.module}/nomad-client-stalker.nomad") version = "0.1.0"
template_path = "service.nomad"
variables = {
name = "nomad-login"
image = "iamthefij/nomad-vault-login"
service_port = 5000
ingress = true
ingress_rule = "Host(`nomad.thefij.rocks`) && PathPrefix(`/login`)"
env = jsonencode({
VAULT_ADDR = "http://$${attr.unique.network.ip-address}:8200",
})
}
}
# module "metrics" {
# source = "./metrics"
# # Not in this module
# # depends_on = [module.databases]
# }
module "loki" {
source = "IamTheFij/levant/nomad"
version = "0.1.0"
template_path = "service.nomad"
variables = {
name = "loki"
image = "grafana/loki:2.2.1"
service_port = 3100
ingress = true
sticky_disk = true
healthcheck = "/ready"
templates = jsonencode([
{
data = file("${path.module}/loki-config.yml")
dest = "/etc/loki/local-config.yaml"
}
])
}
} }
resource "nomad_job" "syslog-ng" { resource "nomad_job" "syslog-ng" {
jobspec = file("${path.module}/syslogng.nomad") jobspec = file("${path.module}/syslogng.nomad")
depends_on = [module.loki]
} }
resource "nomad_job" "ddclient" { resource "nomad_job" "ddclient" {
jobspec = file("${path.module}/ddclient.nomad") jobspec = file("${path.module}/ddclient.nomad")
} }
resource "nomad_job" "lldap" {
jobspec = file("${path.module}/lldap.nomad")
}

View File

@ -1,95 +0,0 @@
resource "nomad_job" "exporters" {
jobspec = templatefile("${path.module}/exporters.nomad", {
use_wesher = var.use_wesher,
})
}
resource "nomad_job" "prometheus" {
jobspec = templatefile("${path.module}/prometheus.nomad", {
use_wesher = var.use_wesher,
})
detach = false
}
resource "nomad_job" "grafana" {
jobspec = templatefile("${path.module}/grafana.nomad", {
module_path = path.module
use_wesher = var.use_wesher
})
depends_on = [nomad_job.prometheus]
}
resource "nomad_acl_policy" "grafana_smtp_secrets" {
name = "grafana-secrets-smtp"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/smtp" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "grafana"
}
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "grafana_mysql_bootstrap_secrets" {
name = "grafana-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "mysql-bootstrap"
}
}
resource "random_password" "grafana_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "grafana_mysql_psk" {
path = "secrets/mysql/allowed_psks/grafana"
items = {
psk = "grafana:${resource.random_password.grafana_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "grafana_mysql_psk" {
name = "grafana-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/grafana" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "stunnel"
}
}

View File

@ -0,0 +1,40 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.0"
hashes = [
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
"h1:tAb2gwW+oZ8/t2j7lExdqpNrxmaWsHbyA2crFWClPb0=",
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
"zh:32fa31329731db2bf4b7d0f09096416ca146f05b58f4482bbd4ee0f28cefbbcc",
"zh:59136b73d3abe7cc5b06d9e12d123ad21298ca86ed49a4060a3cd7c2a28a74a1",
"zh:a191f3210773ca25c543a92f2d392b85e6a053d596293655b1f25b33eb843b4c",
"zh:b8b6033cf0687eadc1099f11d9fb2ca9429ff40c2d85bd6cb047c0f6bc5d5d8d",
"zh:bb7d67ed28aa9b28fc5154161af003383f940b2beda0d4577857cad700f39cd1",
"zh:be615288f59327b975532a1999deab60a022e6819fe80e5a32526155210ecbba",
"zh:de1e3d5c34eef87eb301e74717754babb6dc8e19e3a964919e1165c5a076a719",
"zh:eb8c61b20d8ce2bfff9f735ca8456a0d6368af13aa1f43866f61c70f88cc491c",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}

View File

@ -1,53 +1,71 @@
job "exporters" { job "metrics" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "system"
priority = 55
group "promtail" { group "promtail" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/12023
count = 3
network { network {
mode = "bridge" mode = "bridge"
port "promtail" { port "promtail" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 9080 to = 9080
} }
} }
service { service {
name = "promtail" name = "promtail"
provider = "nomad"
port = "promtail" port = "promtail"
meta { meta {
nomad_dc = "$${NOMAD_DC}" metrics_addr = "${NOMAD_ADDR_promtail}"
nomad_node_name = "$${node.unique.name}" nomad_dc = "${NOMAD_DC}"
nomad_node_name = "${node.unique.name}"
} }
tags = [ connect {
"prometheus.scrape", sidecar_service {
] proxy {
local_service_port = 9080
upstreams {
destination_name = "loki"
local_bind_port = 1000
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
}
}
}
check {
type = "http"
path = "/metrics"
port = "promtail"
interval = "10s"
timeout = "10s"
}
} }
task "promtail" { task "promtail" {
driver = "docker" driver = "docker"
meta = {
"diun.sort_tags" = "semver"
"diun.watch_repo" = true
"diun.include_tags" = "^[0-9]+\\.[0-9]+\\.[0-9]+$"
}
config { config {
image = "grafana/promtail:2.9.1" image = "grafana/promtail:2.2.1"
args = ["-config.file=$${NOMAD_TASK_DIR}/promtail.yml"] args = ["-config.file=/etc/promtail/promtail.yml"]
ports = ["promtail"] ports = ["promtail"]
# Mount config
mount {
type = "bind"
target = "/etc/promtail/promtail.yml"
source = "local/promtail.yml"
}
# Bind mount host machine-id and log directories # Bind mount host machine-id and log directories
mount { mount {
@ -87,9 +105,8 @@ server:
http_listen_port: 9080 http_listen_port: 9080
clients: clients:
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "loki" -}} # loki upstream: {{ env "NOMAD_UPSTREAM_ADDR_loki" }}
- url: http://{{ .Address }}:{{ .Port }}/loki/api/v1/push - url: http://{{ env "NOMAD_UPSTREAM_ADDR_loki" }}/loki/api/v1/push
{{- end }}
scrape_configs: scrape_configs:
@ -120,25 +137,13 @@ scrape_configs:
target_label: docker_compose_project target_label: docker_compose_project
- source_labels: ['__journal_com_docker_compose_service'] - source_labels: ['__journal_com_docker_compose_service']
target_label: docker_compose_service target_label: docker_compose_service
- source_labels: ['__journal_com_hashicorp_nomad_alloc_id']
target_label: nomad_alloc_id
- source_labels: ['__journal_com_hashicorp_nomad_job_id']
target_label: nomad_job_id
- source_labels: ['__journal_com_hashicorp_nomad_job_name']
target_label: nomad_job_name
- source_labels: ['__journal_com_hashicorp_nomad_node_name']
target_label: nomad_node_name
- source_labels: ['__journal_com_hashicorp_nomad_group_name']
target_label: nomad_group_name
- source_labels: ['__journal_com_hashicorp_nomad_task_name']
target_label: nomad_task_name
EOF EOF
destination = "$${NOMAD_TASK_DIR}/promtail.yml" destination = "local/promtail.yml"
} }
resources { resources {
cpu = 50 cpu = 50
memory = 100 memory = 50
} }
} }
} }

228
core/metrics/grafana.nomad Normal file
View File

@ -0,0 +1,228 @@
job "grafana" {
datacenters = ["dc1"]
group "grafana" {
count = 1
network {
mode = "bridge"
port "web" {
host_network = "loopback"
to = 3000
}
port "renderer" {
host_network = "loopback"
to = 8081
}
}
ephemeral_disk {
migrate = true
sticky = true
}
service {
name = "grafana"
port = "web"
connect {
sidecar_service {
proxy {
local_service_port = 3000
upstreams {
destination_name = "prometheus"
local_bind_port = 9090
}
upstreams {
destination_name = "loki"
local_bind_port = 3100
}
upstreams {
destination_name = "mysql-server"
local_bind_port = 6060
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
check {
type = "http"
path = "/"
port = "web"
interval = "10s"
timeout = "10s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.grafana.entryPoints=websecure",
]
}
task "grafana-bootstrap" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "mariadb:10"
args = [
"/bin/bash",
"-c",
"/usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql",
]
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
template {
data = <<EOF
[client]
host={{ env "NOMAD_UPSTREAM_IP_mysql_server" }}
port={{ env "NOMAD_UPSTREAM_PORT_mysql_server" }}
user=root
{{ with secret "kv/data/mysql" }}
password={{ .Data.data.root_password }}
{{ end }}
EOF
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
}
template {
data = <<EOF
{{ with secret "kv/data/grafana" -}}
{{ if .Data.data.db_name -}}
CREATE DATABASE IF NOT EXISTS `{{ .Data.data.db_name }}`;
CREATE USER IF NOT EXISTS '{{ .Data.data.db_user }}'@'%' IDENTIFIED BY '{{ .Data.data.db_pass }}';
GRANT ALL ON `{{ .Data.data.db_name }}`.* to '{{ .Data.data.db_user }}'@'%';
-- Add grafana read_only user
CREATE USER IF NOT EXISTS '{{ .Data.data.db_user_ro }}'@'%' IDENTIFIED BY '{{ .Data.data.db_pass_ro }}';
{{ else -}}
SELECT 'NOOP';
{{ end -}}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql"
}
resources {
cpu = 50
memory = 50
}
}
task "grafana" {
driver = "docker"
config {
image = "grafana/grafana:9.2.4"
ports = ["web"]
mount {
type = "bind"
target = "/etc/grafana"
source = "local/config"
}
}
env = {
"GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel,natel-discrete-panel",
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
template {
data = <<EOF
{{ with secret "kv/data/grafana" -}}
GF_SECURITY_ADMIN_PASSWORD={{ .Data.data.admin_pw }}
GF_SMTP_USER={{ .Data.data.smtp_user }}
GF_SMTP_PASSWORD={{ .Data.data.smtp_password }}
GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .Data.data.minio_access_key }}
GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .Data.data.minio_secret_key }}
GRAFANA_ALERT_EMAIL_ADDRESSES={{ .Data.data.alert_email_addresses }}
{{ if .Data.data.db_name -}}
# Database storage
GF_DATABASE_TYPE=mysql
GF_DATABASE_HOST={{ env "NOMAD_UPSTREAM_ADDR_mysql_server" }}
GF_DATABASE_NAME={{ .Data.data.db_name }}
GF_DATABASE_USER={{ .Data.data.db_user }}
GF_DATABASE_PASSWORD={{ .Data.data.db_pass }}
{{ end -}}
{{ end -}}
{{ with secret "kv/data/slack" -}}
SLACK_BOT_URL={{ .Data.data.bot_url }}
SLACK_BOT_TOKEN={{ .Data.data.bot_token }}
SLACK_HOOK_URL={{ .Data.data.hook_url }}
{{ end -}}
EOF
env = true
destination = "secrets/conf.env"
}
%{ for config_file in fileset(join("/", [module_path, "grafana"]), "**") ~}
template {
data = <<EOF
${file(join("/", [module_path, "grafana", config_file]))}
EOF
change_mode = "signal"
change_signal = "SIGHUP"
destination = "local/config/${config_file}"
perms = 777
# Set owner to grafana uid
# uid = 472
# Change template delimeter for dashboard files that use json and have double curly braces and square braces
%{ if length(regexall("dashboard", config_file)) > 0 ~}
left_delimiter = "<<<<"
right_delimiter = ">>>>"
%{ endif }
}
%{ endfor }
resources {
cpu = 100
memory = 200
}
}
task "grafana-image-renderer" {
driver = "docker"
config {
image = "grafana/grafana-image-renderer:3.6.1"
ports = ["renderer"]
}
env = {
"RENDERING_MODE" = "clustered"
"RENDERING_CLUSTERING_MODE" = "browser"
"RENDERING_CLUSTERING_MAX_CONCURRENCY" = 5
"RENDERING_CLUSTERING_TIMEOUT" = 30
}
}
}
}

View File

@ -20,8 +20,8 @@ data = /var/lib/grafana
# Directory where grafana will automatically scan and look for plugins # Directory where grafana will automatically scan and look for plugins
;plugins = /var/lib/grafana/plugins ;plugins = /var/lib/grafana/plugins
# folder that contains PROVISIONING config files that grafana will apply on startup and while running. # folder that contains provisioning config files that grafana will apply on startup and while running.
provisioning = from_env provisioning = /etc/grafana/provisioning
#################################### Server #################################### #################################### Server ####################################
[server] [server]
@ -43,7 +43,7 @@ provisioning = from_env
# The full public facing url you use in browser, used for redirects and emails # The full public facing url you use in browser, used for redirects and emails
# If you use reverse proxy and sub path specify full url (with sub path) # If you use reverse proxy and sub path specify full url (with sub path)
root_url = https://grafana.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }} root_url = https://grafana.thefij.rocks
# Log web requests # Log web requests
;router_logging = false ;router_logging = false
@ -258,24 +258,15 @@ log_queries =
#################################### Generic OAuth ########################## #################################### Generic OAuth ##########################
[auth.generic_oauth] [auth.generic_oauth]
enabled = true ;enabled = true
name = Authelia ;name = Cloudron
;allow_sign_up = true ;allow_sign_up = true
client_id = grafana ;client_id = some_id
client_secret = from_env ;client_secret = some_secret
scopes = openid profile email groups ;scopes = user:email,read:org
auth_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/authorization ;auth_url = https://foo.bar/login/oauth/authorize
token_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/token ;token_url = https://foo.bar/login/oauth/access_token
api_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/userinfo ;api_url = https://foo.bar/user
login_attribute_path = preferred_username
groups_attribute_path = groups
name_attribute_path = name
# Role attribute path is not working
role_attribute_path = contains(groups[*], 'admin') && 'Admin' || contains(groups[*], 'grafana-admin') && 'Admin' || contains(groups[*], 'grafana-editor') && 'Editor' || contains(groups[*], 'developer') && 'Editor'
allow_assign_grafana_admin = true
skip_org_role_sync = true
use_pkce = true
;team_ids = ;team_ids =
;allowed_organizations = ;allowed_organizations =
@ -290,9 +281,9 @@ use_pkce = true
#################################### Auth Proxy ########################## #################################### Auth Proxy ##########################
[auth.proxy] [auth.proxy]
{{ with nomadService "traefik" -}} {{ with service "traefik" -}}
enabled = false enabled = true
header_name = Remote-User header_name = X-WEBAUTH-USER
header_property = username header_property = username
auto_sign_up = true auto_sign_up = true
{{- $last := len . | subtract 1 -}} {{- $last := len . | subtract 1 -}}
@ -441,7 +432,7 @@ enabled = true
provider = s3 provider = s3
[external_image_storage.s3] [external_image_storage.s3]
endpoint = https://minio.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }} endpoint = https://minio.thefij.rocks
bucket = grafana-images bucket = grafana-images
region = us-east-1 region = us-east-1
path_style_access = true path_style_access = true
@ -451,5 +442,5 @@ path_style_access = true
[rendering] [rendering]
# Since they are inside the same group, we can reference their bound ports # Since they are inside the same group, we can reference their bound ports
server_url = http://127.0.0.1:8081/render server_url = http://localhost:8081/render
callback_url = http://127.0.0.1:3000/ callback_url = http://localhost:3000/

View File

@ -104,7 +104,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": false, "exemplar": false,
"expr": "sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"instant": true, "instant": true,
"interval": "", "interval": "",
@ -458,7 +458,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": true, "exemplar": true,
"expr": "sum(blocky_blacklist_cache) / sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(blocky_blacklist_cache) / sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"instant": false, "instant": false,
"interval": "", "interval": "",
@ -533,7 +533,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": true, "exemplar": true,
"expr": "sum(go_memstats_sys_bytes{job=\"exporters\", consul_service=\"blocky-api\"})/sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(go_memstats_sys_bytes{job=\"exporters\", consul_service=\"blocky-api\"})/sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"instant": false, "instant": false,
"interval": "", "interval": "",
@ -753,7 +753,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": true, "exemplar": true,
"expr": "sum(blocky_cache_entry_count)/ sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(blocky_cache_entry_count)/ sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"instant": false, "instant": false,
"interval": "", "interval": "",
@ -1162,7 +1162,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": false, "exemplar": false,
"expr": "sum(time() -blocky_last_list_group_refresh)/ sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(time() -blocky_last_list_group_refresh)/ sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"instant": true, "instant": true,
"interval": "", "interval": "",
@ -1224,7 +1224,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"exemplar": true, "exemplar": true,
"expr": "sum(blocky_prefetch_domain_name_cache_count)/ sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})", "expr": "sum(blocky_prefetch_domain_name_cache_count)/ sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"format": "table", "format": "table",
"interval": "", "interval": "",
"legendFormat": "", "legendFormat": "",

View File

@ -0,0 +1,783 @@
{
"__inputs": [
{
"name": "DS_PROMETHEUS",
"label": "Prometheus",
"description": "",
"type": "datasource",
"pluginId": "prometheus",
"pluginName": "Prometheus"
}
],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "7.5.5"
},
{
"type": "panel",
"id": "graph",
"name": "Graph",
"version": ""
},
{
"type": "panel",
"id": "piechart",
"name": "Pie chart v2",
"version": ""
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
},
{
"type": "panel",
"id": "singlestat",
"name": "Singlestat",
"version": ""
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"description": "Traefik dashboard prometheus",
"editable": true,
"gnetId": 4475,
"graphTooltip": 0,
"id": null,
"iteration": 1620932097756,
"links": [],
"panels": [
{
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
"id": 10,
"title": "$backend stats",
"type": "row"
},
{
"cacheTimeout": null,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 1
},
"id": 2,
"interval": null,
"links": [],
"maxDataPoints": 3,
"options": {
"displayLabels": [],
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"values": [
"value",
"percent"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"text": {}
},
"targets": [
{
"exemplar": true,
"expr": "traefik_service_requests_total{service=\"$service\"}",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{method}} : {{code}}",
"refId": "A"
}
],
"title": "$service return code",
"type": "piechart"
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"format": "ms",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 1
},
"id": 4,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": true
},
"tableColumn": "",
"targets": [
{
"exemplar": true,
"expr": "sum(traefik_service_request_duration_seconds_sum{service=\"$service\"}) / sum(traefik_service_requests_total{service=\"$service\"}) * 1000",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "",
"refId": "A"
}
],
"thresholds": "",
"title": "$service response time",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "avg"
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 24,
"x": 0,
"y": 8
},
"hiddenSeries": false,
"id": 3,
"legend": {
"alignAsTable": true,
"avg": true,
"current": false,
"max": true,
"min": true,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.5",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"exemplar": true,
"expr": "sum(rate(traefik_service_requests_total{service=\"$service\"}[5m]))",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "Total requests $service",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Total requests over 5min $service",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"collapsed": false,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 15
},
"id": 12,
"panels": [],
"title": "Global stats",
"type": "row"
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 16
},
"hiddenSeries": false,
"id": 5,
"legend": {
"alignAsTable": true,
"avg": false,
"current": true,
"max": true,
"min": true,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.5",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": true,
"steppedLine": false,
"targets": [
{
"expr": "rate(traefik_entrypoint_requests_total{entrypoint=~\"$entrypoint\",code=\"200\"}[5m])",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{method}} : {{code}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Status code 200 over 5min",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 16
},
"hiddenSeries": false,
"id": 6,
"legend": {
"alignAsTable": true,
"avg": false,
"current": true,
"max": true,
"min": true,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.5",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": true,
"steppedLine": false,
"targets": [
{
"expr": "rate(traefik_entrypoint_requests_total{entrypoint=~\"$entrypoint\",code!=\"200\"}[5m])",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{ method }} : {{code}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Others status code over 5min",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"cacheTimeout": null,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 23
},
"id": 7,
"interval": null,
"links": [],
"maxDataPoints": 3,
"options": {
"displayLabels": [],
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"values": [
"value"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"sum"
],
"fields": "",
"values": false
},
"text": {}
},
"targets": [
{
"exemplar": true,
"expr": "sum(rate(traefik_service_requests_total[5m])) by (service) ",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{ service }}",
"refId": "A"
}
],
"title": "Requests by service",
"type": "piechart"
},
{
"cacheTimeout": null,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 23
},
"id": 8,
"interval": null,
"links": [],
"maxDataPoints": 3,
"options": {
"displayLabels": [],
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"values": [
"value"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"sum"
],
"fields": "",
"values": false
},
"text": {}
},
"targets": [
{
"exemplar": true,
"expr": "sum(rate(traefik_entrypoint_requests_total{entrypoint =~ \"$entrypoint\"}[5m])) by (entrypoint) ",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{ entrypoint }}",
"refId": "A"
}
],
"title": "Requests by protocol",
"type": "piechart"
}
],
"schemaVersion": 27,
"style": "dark",
"tags": [
"traefik",
"prometheus"
],
"templating": {
"list": [
{
"allValue": null,
"current": {},
"datasource": "${DS_PROMETHEUS}",
"definition": "label_values(service)",
"description": null,
"error": null,
"hide": 0,
"includeAll": false,
"label": null,
"multi": false,
"name": "service",
"options": [],
"query": {
"query": "label_values(service)",
"refId": "StandardVariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"allValue": null,
"current": {},
"datasource": "${DS_PROMETHEUS}",
"definition": "",
"description": null,
"error": null,
"hide": 0,
"includeAll": true,
"label": null,
"multi": true,
"name": "entrypoint",
"options": [],
"query": {
"query": "label_values(entrypoint)",
"refId": "Prometheus-entrypoint-Variable-Query"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Traefik",
"uid": "qPdAviJmz",
"version": 10
}

View File

@ -5,4 +5,4 @@ providers:
type: file type: file
disableDeletion: false disableDeletion: false
options: options:
path: {{ env "NOMAD_ALLOC_DIR" }}/config/provisioning/dashboards/default path: /etc/grafana/provisioning/dashboards/default

View File

@ -2,11 +2,9 @@
apiVersion: 1 apiVersion: 1
datasources: datasources:
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "loki" }}
- name: Loki - name: Loki
url: "http://{{ .Address }}:{{ .Port }}" url: http://{{ env "NOMAD_UPSTREAM_ADDR_loki" }}
type: loki type: loki
access: proxy access: proxy
isDefault: false isDefault: false
version: 1 version: 1
{{ end }}

View File

@ -0,0 +1,17 @@
---
apiVersion: 1
datasources:
{{ with secret "kv/data/blocky" }}
- name: Blocky logs
url: {{ env "NOMAD_UPSTREAM_ADDR_mysql_server" }}
database: {{ .Data.data.db_name }}
type: mysql
isDefault: false
version: 1
{{ with secret "kv/data/grafana" }}
user: {{ .Data.data.db_user_ro }}
secureJsonData:
password: {{ .Data.data.db_pass_ro }}
{{ end }}
{{ end -}}

View File

@ -2,11 +2,9 @@
apiVersion: 1 apiVersion: 1
datasources: datasources:
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "prometheus" }}
- name: Prometheus - name: Prometheus
url: "http://{{ .Address }}:{{ .Port }}" url: http://{{ env "NOMAD_UPSTREAM_ADDR_prometheus" }}
type: prometheus type: prometheus
access: proxy access: proxy
isDefault: true isDefault: true
version: 1 version: 1
{{ end }}

View File

@ -1,5 +1,5 @@
--- ---
{{ with nomadVar "nomad/jobs/grafana" -}} {{ with secret "kv/data/grafana" -}}
notifiers: notifiers:
- name: Personal email - name: Personal email
type: email type: email
@ -7,5 +7,5 @@ notifiers:
org_id: 1 org_id: 1
is_default: false is_default: false
settings: settings:
addresses: "{{ .alert_email_addresses }}" addresses: "{{ .Data.data.alert_email_addresses }}"
{{ end -}} {{ end -}}

View File

@ -1,5 +1,5 @@
--- ---
{{ with nomadVar "nomad/jobs/grafana" -}} {{ with secret "kv/data/slack" -}}
notifiers: notifiers:
- name: Slack Bot - name: Slack Bot
type: slack type: slack
@ -7,11 +7,11 @@ notifiers:
org_id: 1 org_id: 1
is_default: false is_default: false
settings: settings:
url: "{{ .slack_bot_url }}" url: "{{ .Data.data.bot_url }}"
recipient: "#site-notifications" recipient: "#site-notifications"
username: Grafana Alerts username: Grafana Alerts
icon_url: https://grafana.iamthefij.com/public/img/grafana_icon.svg icon_url: https://grafana.iamthefij.com/public/img/grafana_icon.svg
token: "{{ .slack_bot_token }}" token: "{{ .Data.data.bot_token }}"
uploadImage: true uploadImage: true
mentionChannel: channel mentionChannel: channel
- name: Slack Hook - name: Slack Hook
@ -20,7 +20,7 @@ notifiers:
org_id: 1 org_id: 1
is_default: true is_default: true
settings: settings:
url: "{{ .slack_hook_url }}" url: "{{ .Data.data.hook_url }}"
icon_url: https://grafana.iamthefij.com/public/img/grafana_icon.svg icon_url: https://grafana.iamthefij.com/public/img/grafana_icon.svg
uploadImage: true uploadImage: true
mentionChannel: channel mentionChannel: channel

50
core/metrics/metrics.tf Normal file
View File

@ -0,0 +1,50 @@
resource "nomad_job" "exporters" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/exporters.nomad")
}
resource "nomad_job" "prometheus" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/prometheus.nomad")
}
resource "nomad_job" "grafana" {
hcl2 {
enabled = true
}
jobspec = templatefile("${path.module}/grafana.nomad", {
module_path = path.module
})
}
resource "consul_config_entry" "prometheus_intent" {
name = "prometheus"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "grafana"
Precedence = 9
Type = "consul"
},
]
})
}
# resource "consul_config_entry" "envoy_prometheus_bind" {
# name = "global"
# kind = "proxy-defaults"
#
# config_json = jsonencode({
# "envoy_prometheus_bind_addr" = "0.0.0.0:9102"
# })
# }

View File

@ -0,0 +1,168 @@
job "prometheus" {
datacenters = ["dc1"]
group "prometheus" {
count = 1
network {
mode = "bridge"
port "web" {
host_network = "loopback"
to = 9090
}
}
ephemeral_disk {
migrate = true
sticky = true
}
service {
name = "prometheus"
port = "web"
connect {
sidecar_service {
proxy {
local_service_port = 9090
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
check {
type = "http"
path = "/"
port = "web"
interval = "10s"
timeout = "10s"
}
// TODO: Remove traefik tags
tags = [
"traefik.enable=true",
"traefik.http.routers.prometheus.entryPoints=websecure",
]
}
task "prometheus" {
driver = "docker"
config {
image = "prom/prometheus:v2.30.2"
ports = ["web"]
args = [
"--config.file=/etc/prometheus/config/prometheus.yml",
"--storage.tsdb.path=${NOMAD_ALLOC_DIR}/data/tsdb",
"--web.listen-address=0.0.0.0:9090",
"--web.console.libraries=/usr/share/prometheus/console_libraries",
"--web.console.templates=/usr/share/prometheus/consoles",
]
mount {
type = "bind"
target = "/etc/prometheus/config"
source = "local/config"
}
}
template {
data = <<EOF
---
global:
scrape_interval: 30s
evaluation_interval: 3s
scrape_configs:
- job_name: prometheus
static_configs:
- targets:
- 0.0.0.0:9090
- job_name: "nomad_client"
metrics_path: "/v1/metrics"
params:
format:
- "prometheus"
consul_sd_configs:
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
services:
- "nomad-client"
- job_name: "consul"
metrics_path: "/v1/agent/metrics"
params:
format:
- "prometheus"
consul_sd_configs:
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
services:
- "consul"
relabel_configs:
- source_labels: [__meta_consul_address]
replacement: $1:8500
target_label: __address__
- job_name: "exporters"
metrics_path: "/metrics"
consul_sd_configs:
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
relabel_configs:
- source_labels: [__meta_consul_service]
action: drop
regex: (.+)-sidecar-proxy
- source_labels: [__meta_consul_service_metadata_metrics_addr]
action: keep
regex: (.+)
- source_labels: [__meta_consul_service_metadata_metrics_addr]
target_label: __address__
- source_labels: [__meta_consul_service]
target_label: consul_service
- source_labels: [__meta_consul_node]
target_label: consul_node
- source_labels: [__meta_consul_service_nomad_dc]
target_label: nomad_dc
- source_labels: [__meta_consul_service_nomad_node_name]
target_label: nomad_node_name
- job_name: "envoy"
metrics_path: "/metrics"
consul_sd_configs:
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
relabel_configs:
- source_labels: [__meta_consul_service]
action: keep
regex: (.+)-sidecar-proxy
- source_labels: [__meta_consul_service_metadata_envoy_metrics_addr]
action: keep
regex: (.+)
- source_labels: [__meta_consul_service_metadata_envoy_metrics_addr]
target_label: __address__
- source_labels: [__meta_consul_service]
target_label: consul_service
- source_labels: [__meta_consul_node]
target_label: consul_node
- source_labels: [__meta_consul_service_nomad_dc]
target_label: nomad_dc
- source_labels: [__meta_consul_service_nomad_node_name]
target_label: nomad_node_name
EOF
change_mode = "signal"
change_signal = "SIGHUP"
destination = "local/config/prometheus.yml"
}
resources {
cpu = 100
memory = 300
}
}
}
}

View File

@ -1,32 +0,0 @@
job "nomad-client-stalker" {
type = "system"
group "main" {
network {
mode = "host"
port "main" {}
}
service {
name = "nomad-client-stalker"
provider = "nomad"
port = "main"
}
task "main" {
driver = "docker"
config {
image = "busybox"
args = ["tail", "-f", "/dev/null"]
}
resources {
cpu = 10
memory = 15
memory_max = 30
}
}
}
}

View File

@ -1,145 +0,0 @@
job "prometheus" {
datacenters = ["dc1"]
group "prometheus" {
count = 1
network {
mode = "bridge"
port "web" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 9090
}
port "pushgateway" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
static = 9091
}
}
ephemeral_disk {
migrate = true
sticky = true
}
service {
name = "prometheus"
provider = "nomad"
port = "web"
// TODO: Remove traefik tags
tags = [
"traefik.enable=true",
"traefik.http.routers.prometheus.entryPoints=websecure",
]
}
service {
name = "pushgateway"
provider = "nomad"
port = "pushgateway"
}
task "prometheus" {
driver = "docker"
config {
image = "prom/prometheus:v2.43.0"
ports = ["web"]
args = [
"--config.file=$${NOMAD_TASK_DIR}/prometheus.yml",
"--storage.tsdb.path=$${NOMAD_ALLOC_DIR}/data/tsdb",
"--web.listen-address=0.0.0.0:9090",
"--web.console.libraries=/usr/share/prometheus/console_libraries",
"--web.console.templates=/usr/share/prometheus/consoles",
]
}
template {
data = <<EOF
---
global:
scrape_interval: 30s
evaluation_interval: 3s
scrape_configs:
- job_name: prometheus
static_configs:
- targets:
- 127.0.0.1:9090
- job_name: "pushgateway"
honor_labels: true
static_configs:
- targets:
- 127.0.0.1:9091
- job_name: "nomad_client"
metrics_path: "/v1/metrics"
params:
format:
- "prometheus"
nomad_sd_configs:
# TODO: Use NOMAD_SECRETS_DIR/api.sock and workload idenity when
# workload acls can be set using terraform
- server: "http://{{env "attr.unique.network.ip-address"}}:4646"
relabel_configs:
- source_labels: [__meta_nomad_service]
regex: nomad-client-stalker
action: keep
- source_labels: [__meta_nomad_address]
replacement: "$1:4646"
target_label: __address__
- job_name: "nomad_services"
metrics_path: "/metrics"
nomad_sd_configs:
- server: "http://{{env "attr.unique.network.ip-address"}}:4646"
relabel_configs:
- source_labels: [__meta_nomad_tags]
regex: .*(prometheus.scrape).*
action: keep
- source_labels: [__meta_nomad_service_address,__meta_nomad_service_port]
separator: ":"
target_label: __address__
- source_labels: [__meta_nomad_service]
target_label: nomad_service
- source_labels: [__meta_nomad_dc]
target_label: nomad_dc
- source_labels: [__meta_nomad_node_id]
target_label: nomad_node_id
EOF
change_mode = "signal"
change_signal = "SIGHUP"
destination = "$${NOMAD_TASK_DIR}/prometheus.yml"
}
resources {
cpu = 100
memory = 300
}
}
task "pushgateway" {
driver = "docker"
config {
image = "prom/pushgateway"
ports = ["pushgateway"]
args = [
"--persistence.file=$${NOMAD_ALLOC_DIR}/pushgateway-persistence",
]
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -26,12 +26,8 @@ job "syslogng" {
task "promtail" { task "promtail" {
driver = "docker" driver = "docker"
meta = {
"diun.watch_repo" = true
}
config { config {
image = "grafana/promtail:2.9.1" image = "grafana/promtail:2.2.1"
ports = ["main", "metrics"] ports = ["main", "metrics"]
args = ["--config.file=/etc/promtail/promtail.yml"] args = ["--config.file=/etc/promtail/promtail.yml"]
@ -87,8 +83,28 @@ EOF
service { service {
name = "syslogng" name = "syslogng"
provider = "nomad"
port = "main" port = "main"
connect {
sidecar_service {
proxy {
local_service_port = 514
upstreams {
destination_name = "syslogng-promtail"
local_bind_port = 1000
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
memory_max = 50
}
}
}
} }
task "syslogng" { task "syslogng" {

View File

@ -2,20 +2,20 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "2.1.0" version = "1.4.17"
hashes = [ hashes = [
"h1:ek0L7fA+4R1/BXhbutSRqlQPzSZ5aY/I2YfVehuYeEU=", "h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:39ba4d4fc9557d4d2c1e4bf866cf63973359b73e908cce237c54384512bdb454", "zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:40d2b66e3f3675e6b88000c145977c1d5288510c76b702c6c131d9168546c605", "zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:40fbe575d85a083f96d4703c6b7334e9fc3e08e4f1d441de2b9513215184ebcc", "zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:42ce6db79e2f94557fae516ee3f22e5271f0b556638eb45d5fbad02c99fc7af3", "zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:4acf63dfb92f879b3767529e75764fef68886521b7effa13dd0323c38133ce88", "zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:72cf35a13c2fb542cd3c8528826e2390db9b8f6f79ccb41532e009ad140a3269", "zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:8b8bcc136c05916234cb0c3bcc3d48fda7ca551a091ad8461ea4ab16fb6960a3", "zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:8e1c2f924eae88afe7ac83775f000ae8fd71a04e06228edf7eddce4df2421169", "zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:abc6e725531fc06a8e02e84946aaabc3453ecafbc1b7a442ea175db14fd9c86a", "zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:b735fcd1fb20971df3e92f81bb6d73eef845dcc9d3d98e908faa3f40013f0f69", "zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:ce59797282505d872903789db8f092861036da6ec3e73f6507dac725458a5ec9", "zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
] ]
} }

View File

@ -1,3 +1,9 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
job "traefik" { job "traefik" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "service"
@ -14,15 +20,13 @@ job "traefik" {
update { update {
max_parallel = 1 max_parallel = 1
canary = 1 # canary = 1
auto_promote = false # auto_promote = true
auto_revert = true auto_revert = true
min_healthy_time = "30s"
healthy_deadline = "5m"
} }
group "traefik" { group "traefik" {
count = 2 count = 1
network { network {
port "web" { port "web" {
@ -36,20 +40,6 @@ job "traefik" {
port "syslog" { port "syslog" {
static = 514 static = 514
} }
port "gitssh" {
static = 2222
}
port "metrics" {}
dns {
servers = [
"192.168.2.101",
"192.168.2.102",
"192.168.2.30",
]
}
} }
ephemeral_disk { ephemeral_disk {
@ -57,42 +47,33 @@ job "traefik" {
sticky = true sticky = true
} }
service {
name = "traefik"
provider = "nomad"
port = "web"
check {
type = "http"
path = "/ping"
port = "web"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.traefik.entryPoints=websecure",
"traefik.http.routers.traefik.service=api@internal",
]
}
task "traefik" { task "traefik" {
driver = "docker" driver = "docker"
service {
name = "traefik"
provider = "nomad"
port = "web"
check {
type = "http"
path = "/ping"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.traefik.entryPoints=websecure",
"traefik.http.routers.traefik.service=api@internal",
]
}
service {
name = "traefik-metrics"
provider = "nomad"
port = "metrics"
tags = [
"prometheus.scrape",
]
}
config { config {
image = "traefik:3.0" image = "traefik:2.9"
ports = ["web", "websecure", "syslog", "gitssh", "metrics"] ports = ["web", "websecure"]
network_mode = "host" network_mode = "host"
mount { mount {
@ -106,20 +87,6 @@ job "traefik" {
target = "/etc/traefik/usersfile" target = "/etc/traefik/usersfile"
source = "secrets/usersfile" source = "secrets/usersfile"
} }
mount {
type = "bind"
target = "/etc/traefik/certs"
source = "secrets/certs"
}
}
env = {
TRAEFIK_PROVIDERS_NOMAD_ENDPOINT_TOKEN = "${NOMAD_TOKEN}"
}
identity {
env = true
} }
template { template {
@ -142,9 +109,12 @@ job "traefik" {
[entryPoints.websecure] [entryPoints.websecure]
address = ":443" address = ":443"
[entryPoints.websecure.http.tls] [entryPoints.websecure.http.tls]
certResolver = "letsEncrypt"
[[entryPoints.websecure.http.tls.domains]]
main = "*.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>"
[entryPoints.metrics] [entryPoints.metrics]
address = ":<< env "NOMAD_PORT_metrics" >>" address = ":8989"
[entryPoints.syslogtcp] [entryPoints.syslogtcp]
address = ":514" address = ":514"
@ -152,9 +122,6 @@ job "traefik" {
[entryPoints.syslogudp] [entryPoints.syslogudp]
address = ":514/udp" address = ":514/udp"
[entryPoints.gitssh]
address = ":2222"
[api] [api]
dashboard = true dashboard = true
@ -174,9 +141,29 @@ job "traefik" {
exposedByDefault = false exposedByDefault = false
defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)" defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)"
[providers.nomad.endpoint] [providers.nomad.endpoint]
address = "unix:///secrets/api.sock" address = "http://<< env "attr.unique.network.ip-address" >>:4646"
[certificatesResolvers.letsEncrypt.acme]
email = "<< with nomadVar "nomad/jobs/traefik" >><< .acme_email >><< end >>"
# Store in /local because /secrets doesn't persist with ephemeral disk
storage = "/local/acme.json"
[certificatesResolvers.letsEncrypt.acme.dnsChallenge]
provider = "cloudflare"
resolvers = ["1.1.1.1:53", "8.8.8.8:53"]
delayBeforeCheck = 0
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/traefik.toml" destination = "local/config/traefik.toml"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/traefik" -}}
CF_DNS_API_TOKEN={{ .cloudflare_api_token_dns_edit }}
CF_ZONE_API_TOKEN={{ .cloudflare_api_token_zone_read }}
{{- end }}
EOH
destination = "secrets/cloudflare.env"
env = true
} }
template { template {
@ -188,38 +175,18 @@ job "traefik" {
service = "nomad" service = "nomad"
rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)" rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
{{ with nomadVar "nomad/jobs/traefik" }}{{ with .external }}{{ with .Value | parseYAML -}}
{{ range $service, $url := . }}
[http.routers.{{ $service }}]
entryPoints = ["websecure"]
service = "{{ $service }}"
rule = "Host(`{{ $service }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
{{ end }}
{{- end }}{{ end }}{{ end }}
[http.services] [http.services]
{{ with nomadService "nomad-client" -}}
[http.services.nomad] [http.services.nomad]
[http.services.nomad.loadBalancer] [http.services.nomad.loadBalancer]
{{ range . -}}
[[http.services.nomad.loadBalancer.servers]] [[http.services.nomad.loadBalancer.servers]]
url = "http://127.0.0.1:4646" url = "http://{{ .Address }}:{{ .Port }}"
{{ end }}
{{ with nomadVar "nomad/jobs/traefik" }}{{ with .external }}{{ with .Value | parseYAML -}} {{- end }}
{{ range $service, $url := . }}
[http.services.{{ $service }}]
[http.services.{{ $service }}.loadBalancer]
[[http.services.{{ $service }}.loadBalancer.servers]]
url = "{{ $url }}"
{{ end }}
{{- end }}{{ end }}{{ end }}
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/conf/route-hashi.toml" destination = "local/config/conf/route-hashi.toml"
change_mode = "noop" change_mode = "noop"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
} }
template { template {
@ -238,7 +205,7 @@ job "traefik" {
[[tcp.services.syslogngtcp.loadBalancer.servers]] [[tcp.services.syslogngtcp.loadBalancer.servers]]
address = "{{ .Address }}:{{ .Port }}" address = "{{ .Address }}:{{ .Port }}"
{{ end -}} {{ end -}}
{{- end }} {{ end }}
{{ with nomadService "syslogng" -}} {{ with nomadService "syslogng" -}}
[udp.routers] [udp.routers]
@ -253,41 +220,9 @@ job "traefik" {
[[udp.services.syslogngudp.loadBalancer.servers]] [[udp.services.syslogngudp.loadBalancer.servers]]
address = "{{ .Address }}:{{ .Port }}" address = "{{ .Address }}:{{ .Port }}"
{{ end -}} {{ end -}}
{{- end }} {{ end }}
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/conf/route-syslog-ng.toml" destination = "local/config/conf/route-syslog-ng.toml"
change_mode = "noop"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{- with nomadVar "secrets/certs/_lego/certificates/__thefij_rocks_crt" }}{{ .contents }}{{ end -}}"
EOF
destination = "${NOMAD_SECRETS_DIR}/certs/_.thefij.rocks.crt"
change_mode = "noop"
}
template {
data = <<EOF
{{- with nomadVar "secrets/certs/_lego/certificates/__thefij_rocks_key" }}{{ .contents }}{{ end -}}"
EOF
destination = "${NOMAD_SECRETS_DIR}/certs/_.thefij.rocks.key"
change_mode = "noop"
}
template {
data = <<EOH
[[tls.certificates]]
certFile = "/etc/traefik/certs/_.thefij.rocks.crt"
keyFile = "/etc/traefik/certs/_.thefij.rocks.key"
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/dynamic-tls.toml"
change_mode = "noop" change_mode = "noop"
} }
@ -297,27 +232,29 @@ EOF
{{ with nomadVar "nomad/jobs/traefik" }} {{ with nomadVar "nomad/jobs/traefik" }}
{{ if .usersfile }} {{ if .usersfile }}
[http.middlewares.basic-auth.basicAuth] [http.middlewares.basic-auth.basicAuth]
# TODO: Reference secrets mount
usersFile = "/etc/traefik/usersfile" usersFile = "/etc/traefik/usersfile"
{{- end }} {{ end }}
{{- end }} {{ end }}
EOH EOH
destination = "${NOMAD_TASK_DIR}/config/conf/middlewares.toml" destination = "local/config/conf/middlewares.toml"
change_mode = "noop" change_mode = "noop"
} }
template { template {
data = <<EOH data = <<EOH
{{ with nomadVar "nomad/jobs/traefik" -}} {{ with nomadVar "nomad/jobs/traefik" }}
{{ .usersfile }} {{ .usersfile }}
{{- end }} {{ end }}
EOH EOH
destination = "${NOMAD_SECRETS_DIR}/usersfile" destination = "secrets/usersfile"
change_mode = "noop" change_mode = "noop"
} }
resources { resources {
cpu = 100 cpu = 100
memory = 150 memory = 100
memory_max = 500
} }
} }
} }

View File

@ -1,36 +1,16 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
resource "nomad_job" "traefik" { resource "nomad_job" "traefik" {
hcl2 {
enabled = true
vars = {
"base_hostname" = var.base_hostname,
}
}
jobspec = file("${path.module}/traefik.nomad") jobspec = file("${path.module}/traefik.nomad")
} }
resource "nomad_acl_policy" "treafik_secrets_certs_read" {
name = "traefik-secrets-certs-read"
description = "Read certs to secrets store"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/certs/*" {
capabilities = ["read"]
}
path "secrets/certs" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = resource.nomad_job.traefik.id
}
}
resource "nomad_acl_policy" "traefik_query_jobs" {
name = "traefik-query-jobs"
description = "Allow traefik to query jobs"
rules_hcl = <<EOH
namespace "default" {
capabilities = ["list-jobs", "read-job"]
}
EOH
job_acl {
job_id = resource.nomad_job.traefik.id
}
}

View File

@ -3,9 +3,3 @@ variable "base_hostname" {
description = "Base hostname to serve content from" description = "Base hostname to serve content from"
default = "dev.homelab" default = "dev.homelab"
} }
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

View File

@ -1,40 +1,40 @@
# This file is maintained automatically by "terraform init". # This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/consul" {
version = "2.0.0" version = "2.15.1"
hashes = [ hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=", "h1:PexyQBRLDA+SR+sWlzYBZswry5O5h/tTfj87CaECtLc=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72", "zh:1806830a3cf103e65e772a7d28fd4df2788c29a029fb2def1326bc777ad107ed",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199", "zh:252be544fb4c9daf09cad7d3776daf5fa66b62740d3ea9d6d499a7b1697c3433",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a", "zh:50985fe02a8e5ae47c75d7c28c911b25d7dc4716cff2ed55ca05889ab77a1f73",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:54cf0ec90538703c66937c77e8d72a38d5af47437eb0b8b55eb5836c5d288878",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc", "zh:704f536c621337e06fffef6d5f49ac81f52d249f937250527c12884cb83aefed",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf", "zh:896d8ef6d0b555299f124eb25bce8a17d735da14ef21f07582098d301f47da30",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251", "zh:976277a85b0a0baafe267cc494f766448d1da5b6936ddcb3ce393bd4d22f08d2",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc", "zh:c7faa9a2b11bc45833a3e8e340f22f1ecf01597eaeffa7669234b4549d7dfa85",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599", "zh:caf851ef9c8ce482864badf7058f9278d4537112fa236efd8f1a9315801d9061",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c", "zh:db203435d58b0ac842540861b3307a623423275d85754c171773f3b210ae5b24",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543", "zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e", "zh:f710a37190429045d109edd35de69db3b5f619919c2fa04c77a3a639fea9fd7d",
] ]
} }
provider "registry.terraform.io/hashicorp/random" { provider "registry.terraform.io/hashicorp/nomad" {
version = "3.5.1" version = "1.4.17"
hashes = [ hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=", "h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64", "zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d", "zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831", "zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3", "zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f", "zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b", "zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2", "zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865", "zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03", "zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602", "zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
] ]
} }

50
databases/adminer.nomad Normal file
View File

@ -0,0 +1,50 @@
job "adminer" {
datacenters = ["dc1"]
type = "service"
group "adminer" {
count = 1
# Some affinity to stateful hosts?
network {
mode = "bridge"
port "adminer" {
to = 8080
}
}
service {
name = "adminer"
port = "adminer"
tags = [
"traefik.enable=true",
"traefik.http.routers.adminer.entryPoints=websecure",
]
}
task "adminer" {
driver = "docker"
config {
image = "adminer"
ports = ["adminer"]
}
template {
data = <<EOF
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-server" -}}
ADMINER_DEFAULT_SERVER={{ .Address }}:{{ .Port }}
{{- end }}
EOF
env = true
destination = "env"
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -1,249 +0,0 @@
job "lldap" {
datacenters = ["dc1"]
type = "service"
priority = 80
group "lldap" {
network {
mode = "bridge"
port "web" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
}
port "ldap" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
}
port "tls" {}
}
service {
name = "lldap"
provider = "nomad"
port = "ldap"
}
service {
name = "lldap-tls"
provider = "nomad"
port = "tls"
}
service {
name = "ldap-admin"
provider = "nomad"
port = "web"
tags = [
"traefik.enable=true",
"traefik.http.routers.ldap-admin.entryPoints=websecure",
]
}
task "lldap" {
driver = "docker"
config {
image = "ghcr.io/lldap/lldap:v0.5"
ports = ["ldap", "web"]
args = ["run", "--config-file", "$${NOMAD_TASK_DIR}/lldap_config.toml"]
}
env = {
"LLDAP_VERBOSE" = "true"
"LLDAP_LDAP_PORT" = "$${NOMAD_PORT_ldap}"
"LLDAP_HTTP_PORT" = "$${NOMAD_PORT_web}"
"LLDAP_DATABASE_URL_FILE" = "$${NOMAD_SECRETS_DIR}/database_url.txt"
"LLDAP_KEY_SEED_FILE" = "$${NOMAD_SECRETS_DIR}/key_seed.txt"
"LLDAP_JWT_SECRET_FILE" = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
"LLDAP_USER_PASS_FILE" = "$${NOMAD_SECRETS_DIR}/user_pass.txt"
"LLDAP_SMTP_OPTIONS__PASSWORD_FILE" = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
}
template {
data = <<EOH
ldap_base_dn = "{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}"
{{ with nomadVar "secrets/ldap" -}}
ldap_user_dn = "{{ .admin_user }}"
ldap_user_email = "{{ .admin_email }}"
{{ end -}}
{{ with nomadVar "nomad/jobs/lldap" -}}
[smtp_options]
from = "{{ .smtp_from }}"
reply_to = "{{ .smtp_reply_to }}"
enable_password_reset = true
{{ end -}}
{{ with nomadVar "secrets/smtp" -}}
server = "{{ .server }}"
port = {{ .port }}
tls_required = {{ .tls.Value | toLower }}
user = "{{ .user }}"
{{ end -}}
EOH
destination = "$${NOMAD_TASK_DIR}/lldap_config.toml"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}mysql://{{ .db_user }}:{{ .db_pass }}@127.0.0.1:3306/{{ .db_name }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/database_url.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}{{ .key_seed }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/key_seed.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}{{ .jwt_secret }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"secrets/ldap\" }}{{ .admin_password }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/user_pass.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"secrets/smtp\" }}{{ .password }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
change_mode = "restart"
}
resources {
cpu = 10
memory = 200
memory_max = 200
}
}
task "bootstrap" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "mariadb:10"
args = [
"/usr/bin/timeout",
"2m",
"/bin/bash",
"-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
]
}
template {
data = <<EOF
[client]
host=127.0.0.1
port=3306
user=root
{{ with nomadVar "secrets/mysql" -}}
password={{ .mysql_root_password }}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/lldap" -}}
{{ $db_name := .db_name }}
CREATE DATABASE IF NOT EXISTS `{{ .db_name }}`
CHARACTER SET = 'utf8mb4'
COLLATE = 'utf8mb4_unicode_ci';
DROP USER IF EXISTS '{{ .db_user }}'@'%';
CREATE USER '{{ .db_user }}'@'%'
IDENTIFIED BY '{{ .db_pass }}';
GRANT ALL ON `{{ .db_name }}`.*
TO '{{ .db_user }}'@'%';
{{ else -}}
SELECT 'NOOP';
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql"
}
resources {
cpu = 50
memory = 50
}
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[ldap_server]
accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:{{ env "NOMAD_PORT_ldap" }}
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ range nomadVarList "secrets/ldap/allowed_psks" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/lldap" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
}
}
}

View File

@ -1,123 +0,0 @@
resource "nomad_job" "lldap" {
jobspec = templatefile("${path.module}/lldap.nomad", {
use_wesher = var.use_wesher,
})
depends_on = [resource.nomad_job.mysql-server]
# Block until deployed as there are servics dependent on this one
detach = false
}
# Give access to ldap secrets
resource "nomad_acl_policy" "lldap_ldap_secrets" {
name = "lldap-secrets-ldap"
description = "Give access to LDAP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/ldap/*" {
capabilities = ["read"]
}
path "secrets/ldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "lldap_ldap_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "lldap_ldap_psk" {
path = "secrets/ldap/allowed_psks/ldap"
items = {
psk = "lldap:${resource.random_password.lldap_ldap_psk.result}"
}
}
# Give access to smtp secrets
resource "nomad_acl_policy" "lldap_smtp_secrets" {
name = "lldap-secrets-smtp"
description = "Give access to SMTP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/smtp" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "lldap"
}
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "lldap_mysql_bootstrap_secrets" {
name = "lldap-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "bootstrap"
}
}
resource "random_password" "lldap_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "lldap_mysql_psk" {
path = "secrets/mysql/allowed_psks/lldap"
items = {
psk = "lldap:${resource.random_password.lldap_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "lldap_mysql_psk" {
name = "lldap-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/lldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "stunnel"
}
}

View File

@ -17,10 +17,8 @@ job "mysql-server" {
mode = "bridge" mode = "bridge"
port "db" { port "db" {
static = 3306 to = 3306
} }
port "tls" {}
} }
volume "mysql-data" { volume "mysql-data" {
@ -35,12 +33,6 @@ job "mysql-server" {
port = "db" port = "db"
} }
service {
name = "mysql-tls"
provider = "nomad"
port = "tls"
}
task "mysql-server" { task "mysql-server" {
driver = "docker" driver = "docker"
@ -63,56 +55,17 @@ job "mysql-server" {
template { template {
data = <<EOH data = <<EOH
{{ with nomadVar "nomad/jobs/mysql-server" }} {{ with nomadVar "nomad/jobs" }}
MYSQL_ROOT_PASSWORD={{ .mysql_root_password }} MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
{{ end }} {{ end }}
EOH EOH
destination = "${NOMAD_SECRETS_DIR}/db.env" destination = "secrets/db.env"
env = true env = true
} }
resources { resources {
cpu = 300 cpu = 300
memory = 1536 memory = 1024
}
}
task "stunnel" {
driver = "docker"
config {
image = "iamthefij/stunnel:latest"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[mysql_server]
accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:3306
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
EOF
destination = "${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ range nomadVarList "secrets/mysql/allowed_psks" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }}
{{ end -}}
EOF
destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
} }
} }
} }

View File

@ -1,41 +1,18 @@
resource "nomad_job" "mysql-server" { resource "nomad_job" "mysql-server" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/mysql.nomad") jobspec = file("${path.module}/mysql.nomad")
# Block until deployed as there are servics dependent on this one # Block until deployed as there are servics dependent on this one
detach = false detach = false
} }
resource "nomad_acl_policy" "secrets_mysql" { resource "nomad_job" "adminer" {
name = "secrets-mysql" hcl2 {
description = "Give access to MySQL secrets" enabled = true
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
path "secrets/mysql/*" {
capabilities = ["read"]
}
} }
}
EOH
job_acl { jobspec = file("${path.module}/adminer.nomad")
# job_id = resource.nomad_job.mysql-server.id
job_id = "mysql-server"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "mysql_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "mysql_mysql_psk" {
path = "secrets/mysql/allowed_psks/mysql"
items = {
psk = "mysql:${resource.random_password.mysql_mysql_psk.result}"
}
} }

View File

@ -1,120 +0,0 @@
job "postgres-server" {
datacenters = ["dc1"]
type = "service"
priority = 80
group "postgres-server" {
count = 1
restart {
attempts = 10
interval = "5m"
delay = "25s"
mode = "delay"
}
network {
mode = "bridge"
port "db" {
static = 5432
}
port "tls" {}
}
volume "postgres-data" {
type = "host"
read_only = false
source = "postgres-data"
}
service {
name = "postgres-server"
provider = "nomad"
port = "db"
}
service {
name = "postgres-tls"
provider = "nomad"
port = "tls"
}
task "postgres-server" {
driver = "docker"
config {
image = "postgres:14"
ports = ["db"]
}
volume_mount {
volume = "postgres-data"
destination = "/var/lib/postgresql/data"
read_only = false
}
env = {
# Allow connections from any host
"MYSQL_ROOT_HOST" = "%"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/postgres-server" }}
POSTGRES_USER={{ .superuser }}
POSTGRES_PASSWORD={{ .superuser_pass }}
{{ end }}
EOH
destination = "secrets/db.env"
env = true
}
resources {
cpu = 500
memory = 700
memory_max = 1200
}
}
task "stunnel" {
driver = "docker"
config {
image = "iamthefij/stunnel:latest"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[postgres_server]
accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:5432
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
EOF
destination = "${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ range nomadVarList "secrets/postgres/allowed_psks" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }}
{{ end -}}
EOF
destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
}
}
}

View File

@ -1,41 +0,0 @@
resource "nomad_job" "postgres-server" {
jobspec = file("${path.module}/postgres.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_acl_policy" "secrets_postgres" {
name = "secrets-postgres"
description = "Give access to Postgres secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres" {
capabilities = ["read"]
}
path "secrets/postgres/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.postgres-server.id
job_id = "postgres-server"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "postgres_postgres_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "postgres_postgres_psk" {
path = "secrets/postgres/allowed_psks/postgres"
items = {
psk = "postgres:${resource.random_password.postgres_postgres_psk.result}"
}
}

View File

@ -1,7 +1,7 @@
job "redis-${name}" { job "redis" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "service"
priority = 80 priority = 60
group "cache" { group "cache" {
count = 1 count = 1
@ -15,13 +15,22 @@ job "redis-${name}" {
network { network {
mode = "bridge" mode = "bridge"
port "tls" {} port "main" {
to = 6379
}
} }
service { service {
name = "redis-${name}" name = "redis"
provider = "nomad" provider = "nomad"
port = "tls" port = "main"
# check {
# name = "alive"
# type = "tcp"
# interval = "10s"
# timeout = "2s"
# }
} }
task "redis" { task "redis" {
@ -29,54 +38,14 @@ job "redis-${name}" {
config { config {
image = "redis:6" image = "redis:6"
args = ["redis-server", "--save", "60", "1", "--loglevel", "warning", "--dir", "$${NOMAD_ALLOC_DIR}/data"] args = ["redis-server", "--save", "60", "1", "--loglevel", "warning", "--dir", "${NOMAD_ALLOC_DIR}/data"]
ports = ["main"] ports = ["main"]
} }
resources { resources {
cpu = 100 cpu = 100
memory = 64 memory = 512
memory_max = 512 memory_max = 1024
}
}
task "stunnel" {
driver = "docker"
config {
image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 50
memory = 15
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[redis_server]
accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:6379
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/redis-${name}" -}}
{{ .allowed_psks }}
{{- end }}
EOF
destination = "$${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
} }
} }
} }

View File

@ -1,12 +1,18 @@
resource "nomad_job" "redis" { resource "nomad_job" "redis" {
for_each = toset(["blocky", "authelia"]) hcl2 {
enabled = true
}
jobspec = templatefile("${path.module}/redis.nomad", jobspec = file("${path.module}/redis.nomad")
{
name = each.key,
}
)
# Block until deployed as there are servics dependent on this one # Block until deployed as there are servics dependent on this one
detach = false detach = false
} }
resource "nomad_job" "rediscommander" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/rediscommander.nomad")
}

View File

@ -0,0 +1,50 @@
job "rediscommander" {
datacenters = ["dc1"]
type = "service"
group "rediscommander" {
count = 1
network {
mode = "bridge"
port "main" {
to = 8081
}
}
service {
name = "rediscommander"
port = "main"
tags = [
"traefik.enable=true",
"traefik.http.routers.rediscommander.entryPoints=websecure",
]
}
task "rediscommander" {
driver = "docker"
config {
image = "rediscommander/redis-commander:latest"
ports = ["main"]
}
template {
data = <<EOH
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis" -}}
REDIS_HOSTS=local:{{ .Address }}:{{ .Port }}
{{- end }}
EOH
env = true
destination = "env"
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -1,5 +0,0 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

34
main.tf
View File

@ -1,34 +0,0 @@
module "databases" {
source = "./databases"
use_wesher = var.use_wesher
}
module "core" {
source = "./core"
base_hostname = var.base_hostname
use_wesher = var.use_wesher
# Metrics and Blocky depend on databases
depends_on = [module.databases]
}
module "services" {
source = "./services"
base_hostname = var.base_hostname
use_wesher = var.use_wesher
# NOTE: It may be possible to flip this and core so core templates don't
# need to be rerendered every time a service goes up or down.
depends_on = [module.databases, module.core]
}
module "backups" {
source = "./backups"
use_wesher = var.use_wesher
depends_on = [module.databases, module.services, module.core]
}

Some files were not shown because too many files have changed in this diff Show More