Compare commits

..

2 Commits

Author SHA1 Message Date
852c44d435 Update Consul ACL backend 2022-07-27 13:39:19 -07:00
0fbc1c716b WIP: Begin config to bootstrap ACLs
Following guide here: https://learn.hashicorp.com/tutorials/consul/vault-consul-secrets?in=consul/vault-secure

Unsure of how this will actually authenticate though.
2022-07-27 13:13:22 -07:00
193 changed files with 11162 additions and 16423 deletions

51
.gitignore vendored
View File

@ -1,53 +1,8 @@
# ---> Terraform
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sentitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
#
*.tfvars
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
#
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
# ---> Ansible
*.retry
ansible_galaxy/ansible_collections/
ansible_galaxy/roles/
# Repo specific
roles/
venv/
ca/
# Non-public bootstrap values
vault-keys.json
nomad_bootstrap.json
ca/
collections/ansible_collections/
consul_values.yml
vault_hashi_vault_values.yml
vault_*.yml
ansible_playbooks/vars/nomad_vars.yml

View File

@ -1,34 +0,0 @@
---
repos:
- repo: https://github.com/antonbabenko/pre-commit-terraform
rev: v1.76.0
hooks:
- id: terraform_fmt
- id: terraform_validate
args:
- --tf-init-args=-lockfile=readonly
- id: terraform_tflint
args:
- --args=--config=__GIT_WORKING_DIR__/.tflint.hcl
- id: terraform_tfsec
# - id: terraform_providers_lock
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
- id: check-added-large-files
- id: check-merge-conflict
- id: end-of-file-fixer
exclude: "^ansible_playbooks/vars/nomad_vars.sample.yml$"
- id: trailing-whitespace
- repo: https://github.com/Yelp/detect-secrets
rev: v1.4.0
hooks:
- id: detect-secrets
args: ['--baseline', '.secrets-baseline']
- repo: local
hooks:
- id: variable-sample
name: generate variable sample file
language: system
entry: bash -c 'venv/bin/python scripts/nomad_vars.py print > ./ansible_playbooks/vars/nomad_vars.sample.yml'
types: [file]

View File

@ -1,191 +0,0 @@
{
"version": "1.4.0",
"plugins_used": [
{
"name": "ArtifactoryDetector"
},
{
"name": "AWSKeyDetector"
},
{
"name": "AzureStorageKeyDetector"
},
{
"name": "Base64HighEntropyString",
"limit": 4.5
},
{
"name": "BasicAuthDetector"
},
{
"name": "CloudantDetector"
},
{
"name": "DiscordBotTokenDetector"
},
{
"name": "GitHubTokenDetector"
},
{
"name": "HexHighEntropyString",
"limit": 3.0
},
{
"name": "IbmCloudIamDetector"
},
{
"name": "IbmCosHmacDetector"
},
{
"name": "JwtTokenDetector"
},
{
"name": "KeywordDetector",
"keyword_exclude": ""
},
{
"name": "MailchimpDetector"
},
{
"name": "NpmDetector"
},
{
"name": "PrivateKeyDetector"
},
{
"name": "SendGridDetector"
},
{
"name": "SlackDetector"
},
{
"name": "SoftlayerDetector"
},
{
"name": "SquareOAuthDetector"
},
{
"name": "StripeDetector"
},
{
"name": "TwilioKeyDetector"
}
],
"filters_used": [
{
"path": "detect_secrets.filters.allowlist.is_line_allowlisted"
},
{
"path": "detect_secrets.filters.common.is_baseline_file",
"filename": ".secrets-baseline"
},
{
"path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies",
"min_level": 2
},
{
"path": "detect_secrets.filters.heuristic.is_indirect_reference"
},
{
"path": "detect_secrets.filters.heuristic.is_likely_id_string"
},
{
"path": "detect_secrets.filters.heuristic.is_lock_file"
},
{
"path": "detect_secrets.filters.heuristic.is_not_alphanumeric_string"
},
{
"path": "detect_secrets.filters.heuristic.is_potential_uuid"
},
{
"path": "detect_secrets.filters.heuristic.is_prefixed_with_dollar_sign"
},
{
"path": "detect_secrets.filters.heuristic.is_sequential_string"
},
{
"path": "detect_secrets.filters.heuristic.is_swagger_file"
},
{
"path": "detect_secrets.filters.heuristic.is_templated_secret"
},
{
"path": "detect_secrets.filters.regex.should_exclude_secret",
"pattern": [
"(\\${.*}|from_env|fake|!secret|VALUE)"
]
}
],
"results": {
"core/authelia.yml": [
{
"type": "Secret Keyword",
"filename": "core/authelia.yml",
"hashed_secret": "7cb6efb98ba5972a9b5090dc2e517fe14d12cb04",
"is_verified": false,
"line_number": 54,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/authelia.yml",
"hashed_secret": "a32b08d97b1615dc27f58b6b17f67624c04e2c4f",
"is_verified": false,
"line_number": 201,
"is_secret": false
}
],
"core/grafana/grafana.ini": [
{
"type": "Basic Auth Credentials",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4",
"is_verified": false,
"line_number": 78,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "55ebda65c08313526e7ba08ad733e5ebea9900bd",
"is_verified": false,
"line_number": 109,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "d033e22ae348aeb5660fc2140aec35850c4da997",
"is_verified": false,
"line_number": 151,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "10bea62ff1e1a7540dc7a6bc10f5fa992349023f",
"is_verified": false,
"line_number": 154,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "5718bce97710e6be87ea160b36eaefb5032857d3",
"is_verified": false,
"line_number": 239,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "10aed9d7ebef778a9b3033dba3f7813b639e0d50",
"is_verified": false,
"line_number": 252,
"is_secret": false
}
]
},
"generated_at": "2024-08-30T18:12:43Z"
}

97
.terraform.lock.hcl generated
View File

@ -1,40 +1,79 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.2.0"
provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.1"
hashes = [
"h1:BAjqzVkuXxHtRKG+l9unaZJPk2kWZpSTCEcQPRcl2so=",
"zh:052f909d25121e93dc799290216292fca67943ccde12ba515068b838a6ff8c66",
"zh:20e29aeb9989f7a1e04bb4093817c7acc4e1e737bb21a3066f3ea46f2001feff",
"zh:2326d101ef427599b72cce30c0e0c1d18ae783f1a897c20f2319fbf54bab0a61",
"zh:3420cbe4fd19cdc96d715d0ae8e79c272608023a76033bbf582c30637f6d570f",
"zh:41ec570f87f578f1c57655e2e4fbdb9932d94cf92dc9cd11828cccedf36dd4a4",
"zh:5f90dcc58e3356ffead82ea211ecb4a2d7094d3c2fbd14ff85527c3652a595a2",
"zh:64aaa48609d2db868fcfd347490df0e12c6c3fcb8e4f12908c5d52b1a0adf73f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:86b4923e10e6ba407d1d2aab83740b702058e8b01460af4f5f0e4008f40e492c",
"zh:ae89dcba33097af33a306344d20e4e25181f15dcc1a860b42db5b7199a97c6a6",
"zh:ce56d68cdfba60891765e94f9c0bf69eddb985d44d97db9f91874bea027f08e2",
"zh:e993bcde5dbddaedf3331e3014ffab904f98ab0f5e8b5d6082b7ca5083e0a2f1",
"h1:PexyQBRLDA+SR+sWlzYBZswry5O5h/tTfj87CaECtLc=",
"zh:1806830a3cf103e65e772a7d28fd4df2788c29a029fb2def1326bc777ad107ed",
"zh:252be544fb4c9daf09cad7d3776daf5fa66b62740d3ea9d6d499a7b1697c3433",
"zh:50985fe02a8e5ae47c75d7c28c911b25d7dc4716cff2ed55ca05889ab77a1f73",
"zh:54cf0ec90538703c66937c77e8d72a38d5af47437eb0b8b55eb5836c5d288878",
"zh:704f536c621337e06fffef6d5f49ac81f52d249f937250527c12884cb83aefed",
"zh:896d8ef6d0b555299f124eb25bce8a17d735da14ef21f07582098d301f47da30",
"zh:976277a85b0a0baafe267cc494f766448d1da5b6936ddcb3ce393bd4d22f08d2",
"zh:c7faa9a2b11bc45833a3e8e340f22f1ecf01597eaeffa7669234b4549d7dfa85",
"zh:caf851ef9c8ce482864badf7058f9278d4537112fa236efd8f1a9315801d9061",
"zh:db203435d58b0ac842540861b3307a623423275d85754c171773f3b210ae5b24",
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
"zh:f710a37190429045d109edd35de69db3b5f619919c2fa04c77a3a639fea9fd7d",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0"
provider "registry.terraform.io/hashicorp/external" {
version = "2.2.2"
hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"h1:e7RpnZ2PbJEEPnfsg7V0FNwbfSk0/Z3FdrLsXINBmDY=",
"zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca",
"zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28",
"zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b",
"zh:719d6ef39c50e4cffc67aa67d74d195adaf42afcf62beab132dafdb500347d39",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
"zh:7fbfc4d37435ac2f717b0316f872f558f608596b389b895fcb549f118462d327",
"zh:8ac71408204db606ce63fe8f9aeaf1ddc7751d57d586ec421e62d440c402e955",
"zh:a4cacdb06f114454b6ed0033add28006afa3f65a0ea7a43befe45fc82e6809fb",
"zh:bb5ce3132b52ae32b6cc005bc9f7627b95259b9ffe556de4dad60d47d47f21f0",
"zh:bb60d2976f125ffd232a7ccb4b3f81e7109578b23c9c6179f13a11d125dca82a",
"zh:f9540ecd2e056d6e71b9ea5f5a5cf8f63dd5c25394b9db831083a9d4ea99b372",
"zh:ffd998b55b8a64d4335a090b6956b4bf8855b290f7554dd38db3302de9c41809",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.17"
hashes = [
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
]
}
provider "registry.terraform.io/hashicorp/vault" {
version = "3.8.0"
constraints = "3.8.0"
hashes = [
"h1:F+1vJ14D9nNx3sNrCbKxvpJZ+QnVmD1p/ITbYPlkRg4=",
"zh:2c807352fd061f31d2972f131b74ab2e2c47031760a9f18b6f4b4a699d384969",
"zh:3c5d6334c367c41d570f0eb226be0dfbdb31034669b8914b509f145a279c2bfa",
"zh:4ce3887e53cc9536bfd500fac09caaab93084ed145532a521826a5093e7f8dd7",
"zh:6990eac4216fb8d7fcbe0a483cc1c6a077d0e970db84fb1c0b9032158b555c0e",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:939576f814ee4406131bdd3564cee041b05176d2e0a0b55e8081019348125e76",
"zh:a0545395bd6039f7c9998113ada4334717eb1c74fee4ece7da1d4f3e6d5ef7ba",
"zh:a086e5e4fdadcb0492f48074047954cc6c437b9ee57d9ec7ba850fb7cb5455a8",
"zh:c997156a7c23fa06304d7e22cfd64407e9ed69237c5780d20026521ce2be478d",
"zh:d47ad773cf50d703450cf301872cbc33938712a5ae491dfebf77611e1bcb0237",
"zh:d95de02ccc23416e2eefb689c94046a5dcb4c65ab96cebc61838c5b1ef70e1d3",
"zh:f166c7ed64c12978c4296d477ca508df82791648e6e9ff523268c1d361493851",
]
}

View File

@ -1,7 +0,0 @@
rule "terraform_required_version" {
enabled = false
}
rule "terraform_required_providers" {
enabled = false
}

144
Makefile
View File

@ -1,76 +1,79 @@
SLEEP_FOR ?= 10
VENV ?= venv
SERVER ?= "192.168.2.41"
SSH_USER = iamthefij
SSH_KEY = ~/.ssh/id_ed25519
.PHONY: sleep
sleep:
sleep $(SLEEP_FOR)
.PHONY: rm-nomad
rm-nomad:
hashi-up nomad uninstall \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: default
default: check
.PHONY: nomad-up
nomad-up:
hashi-up nomad install \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS) \
--server --client
hashi-up nomad start \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: all
all: cluster bootstrap-values apply
.PHONY: rm-consul
rm-consul:
hashi-up consul uninstall \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: consul-up
consul-up:
hashi-up consul install \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS) \
--advertise-addr $(SERVER) \
--client-addr 0.0.0.0 \
--http-addr 0.0.0.0 \
--connect \
--server
hashi-up consul start \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: cluster
cluster: ansible-cluster
# Ensures virtualenv is present
$(VENV):
python3 -m venv $(VENV)
$(VENV)/bin/pip install -r requirements.txt
venv/bin/ansible:
python3 -m venv venv
./venv/bin/pip install ansible python-consul hvac
# Installs pre-commit hooks
.PHONY: install-hooks
install-hooks: $(VENV)
$(VENV)/bin/pre-commit install --install-hooks
.PHONY: galaxy
galaxy: venv/bin/ansible
./venv/bin/ansible-galaxy install -p roles -r roles/requirements.yml
./venv/bin/ansible-galaxy collection install -r collections/requirements.yml
# Checks files for encryption
.PHONY: check
check: $(VENV)
$(VENV)/bin/pre-commit run --all-files
# Creates a new secrets baseline
.secrets-baseline: $(VENV) Makefile
$(VENV)/bin/detect-secrets scan --exclude-secrets '(\$${.*}|from_env|fake|!secret|VALUE)' > .secrets-baseline
# Audits secrets against baseline
.PHONY: secrets-audit
secrets-audit: $(VENV) .secrets-baseline
$(VENV)/bin/detect-secrets audit .secrets-baseline
# Updates secrets baseline
.PHONY: secrets-update
secrets-update: $(VENV) .secrets-baseline
$(VENV)/bin/detect-secrets scan --baseline .secrets-baseline
.PHONY: ansible_galaxy
ansible_galaxy: ansible_galaxy/ansible_collections ansible_galaxy/roles
ansible_galaxy/ansible_collections: $(VENV) ./ansible_galaxy/requirements.yml
$(VENV)/bin/ansible-galaxy collection install -p ./ansible_galaxy -r ./ansible_galaxy/requirements.yml
ansible_galaxy/roles: $(VENV) ./ansible_galaxy/requirements.yml
$(VENV)/bin/ansible-galaxy install -p ./ansible_galaxy/roles -r ./ansible_galaxy/requirements.yml
.PHONY: ansible-cluster
ansible-cluster: $(VENV) ansible_galaxy
env VIRTUAL_ENV=$(VENV) $(VENV)/bin/ansible-playbook -K -vv \
ansible-cluster: venv/bin/ansible galaxy
env VIRTUAL_ENV=/Users/ifij/workspace/iamthefij/orchestration-tests/nomad/venv ./venv/bin/ansible-playbook -K -vv \
$(shell test -f vault-keys.json && echo '-e "@vault-keys.json"') \
./ansible_playbooks/setup-cluster.yml
-i ansible_hosts.yml -M ./roles ./setup-cluster.yml
.PHONY: bootstrap-values
bootstrap-values: $(VENV)
env NOMAD_ADDR=http://192.168.2.101:4646 \
NOMAD_TOKEN=$(shell jq -r .SecretID nomad_bootstrap.json) \
$(VENV)/bin/python ./scripts/nomad_vars.py
.PHONY: recover-nomad
recover-nomad: $(VENV)
$(VENV)/bin/ansible-playbook -K ./ansible_playbooks/recover-nomad.yaml
.PHONY: stop-cluster
stop-cluster: $(VENV)
$(VENV)/bin/ansible-playbook -K ./ansible_playbooks/stop-cluster.yml
bootstrap-values: venv/bin/ansible galaxy
env VIRTUAL_ENV=/Users/ifij/workspace/iamthefij/orchestration-tests/nomad/venv ./venv/bin/ansible-playbook -vv \
$(shell test -f vault-keys.json && echo '-e "@vault-keys.json"') \
-i ansible_hosts.yml -M ./roles ./bootstrap-values.yml
.PHONY: init
init:
@ -80,26 +83,15 @@ init:
plan:
@terraform plan \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
-var "vault_token=$(shell jq -r .root_token vault-keys.json)"
.PHONY: apply
apply:
@terraform apply \
-auto-approve \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
-var "vault_token=$(shell jq -r .root_token vault-keys.json)"
.PHONY: refresh
refresh:
@terraform refresh \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: destroy
destroy:
@terraform destroy \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: clean
clean:
env VIRTUAL_ENV=$(VENV) $(VENV)/bin/ansible-playbook -vv \
./ansible_playbooks/clear-data.yml
find -name "*.tfstate" -exec rm '{}' \;
rm -f ./vault-keys.json ./nomad_bootstrap.json
# Install CNI on hosts?
# curl -L -o cni-plugins.tgz "https://github.com/containernetworking/plugins/releases/download/v1.0.0/cni-plugins-linux-$( [ $(uname -m) = aarch64 ] && echo arm64 || echo amd64)"-v1.0.0.tgz
# sudo mkdir -p /opt/cni/bin
# sudo tar -C /opt/cni/bin -xzf cni-plugins.tgz

View File

@ -1,46 +0,0 @@
# Homelab Nomad
My configuration for creating my home Nomad cluster and deploying services to it.
This repo is not designed as general purpose templates, but rather to fit my specific needs. That said, I have made an effort for things to be as useful as possible for someone wanting to use or modify this.
## Running
make all
## Design
Both Ansible and Terraform are used as part of this configuration. All hosts must be reachable over SSH prior to running any of this configuration.
To begin, Ansible runs a playbook to setup the cluster. This includes installing Nomad, bootstrapping the cluster and ACLs, setting up NFS shares, creating Nomad Host Volumes, and setting up Wesher as a Wireguard mesh between hosts.
After this is complete, Nomad variables must be set for services to access and configure correctly. This depends on variables to be set based on the sample file.
Finally, the Terraform configuration can be applied setting up all services deployed on the cluster.
The configuration of new services is intended to be as templated as possible and to avoid requiring changes in multiple places. For example, most services are configured with a template that provides reverse proxy, DNS records, database tunnels, database bootstrapping, metrics scraping, and authentication. The only real exception is backups, which requires a distinct job file, for now.
## What does it do?
* Nomad cluster for scheduling and configuring all services
* Blocky DNS servers with integrated ad blocking. This also provides service discovery
* Prometheus with autodiscovery of service metrics
* Loki and Promtail aggregating logs
* Minitor for service availability checks
* Grafana providing dashboards, alerting, and log searching
* Photoprism for photo management
* Remote and shared volumes over NFS
* Authelia for OIDC and Proxy based authentication with 2FA
* Sonarr and Lidarr for multimedia management
* Automated block based backups using Restic
## Step by step
1. Update hosts in `ansible_playbooks/ansible_hosts.yml`
2. Update `ansible_playbook/setup-cluster.yml`
1. Update backup DNS server
2. Update NFS shares from NAS
3. Update volumes to make sure they are valid paths
3. Create `ansible_playbooks/vars/nomad_vars.yml` based on the sample file. TODO: This is quite specific and probably impossible without more documentation
4. Run `make all`
5. Update your network DNS settings to use the new servers IP addresses

View File

@ -1,21 +1,60 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.20"
provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.1"
hashes = [
"h1:M/QVXHPfeySejJZI3I8mBYrL/J9VsbnyF/dKIMlUhXo=",
"zh:02989edcebe724fc0aa873b22176fd20074c4f46295e728010711a8fc5dfa72c",
"zh:089ba7d19bcf5c6bab3f8b8c5920eb6d78c52cf79bb0c5dfeb411c600e7efcba",
"zh:235865a2182ca372bcbf440201a8b8cc0715ad5dbc4de893d99b6f32b5be53ab",
"zh:67ea718764f3f344ecc6e027d20c1327b86353c8064aa90da3ec12cec4a88954",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:8c68c540f0df4980568bdd688c2adec86eda62eb2de154e3db215b16de0a7ae0",
"zh:911969c63a69a733be57b96d54c5966c9424e1abec8d5f20038c8cef3a504c65",
"zh:a673c92ddc9d47e8d53dcb9b376f1adcb4543488202fc83a3e7eab8677530684",
"zh:a94a73eae89fd8c8ebf872013079be41161d3f293f4026c92d45c4c5667dd613",
"zh:db6b89f8b696040c0344f00928e4cf6e0a75034421ba14cdcd8a4d23bc865dce",
"zh:e512c0b1239e3d66b60d22c2b4de19fea288e492cde90dff9277cc475fd9dbbf",
"zh:ef6eccecbdef3bb8ce629cabfb5550c1db5c3e952943dda1786ef6cb470a8c23",
"h1:PexyQBRLDA+SR+sWlzYBZswry5O5h/tTfj87CaECtLc=",
"zh:1806830a3cf103e65e772a7d28fd4df2788c29a029fb2def1326bc777ad107ed",
"zh:252be544fb4c9daf09cad7d3776daf5fa66b62740d3ea9d6d499a7b1697c3433",
"zh:50985fe02a8e5ae47c75d7c28c911b25d7dc4716cff2ed55ca05889ab77a1f73",
"zh:54cf0ec90538703c66937c77e8d72a38d5af47437eb0b8b55eb5836c5d288878",
"zh:704f536c621337e06fffef6d5f49ac81f52d249f937250527c12884cb83aefed",
"zh:896d8ef6d0b555299f124eb25bce8a17d735da14ef21f07582098d301f47da30",
"zh:976277a85b0a0baafe267cc494f766448d1da5b6936ddcb3ce393bd4d22f08d2",
"zh:c7faa9a2b11bc45833a3e8e340f22f1ecf01597eaeffa7669234b4549d7dfa85",
"zh:caf851ef9c8ce482864badf7058f9278d4537112fa236efd8f1a9315801d9061",
"zh:db203435d58b0ac842540861b3307a623423275d85754c171773f3b210ae5b24",
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
"zh:f710a37190429045d109edd35de69db3b5f619919c2fa04c77a3a639fea9fd7d",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.17"
hashes = [
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
]
}
provider "registry.terraform.io/hashicorp/vault" {
version = "3.8.0"
constraints = "3.8.0"
hashes = [
"h1:F+1vJ14D9nNx3sNrCbKxvpJZ+QnVmD1p/ITbYPlkRg4=",
"zh:2c807352fd061f31d2972f131b74ab2e2c47031760a9f18b6f4b4a699d384969",
"zh:3c5d6334c367c41d570f0eb226be0dfbdb31034669b8914b509f145a279c2bfa",
"zh:4ce3887e53cc9536bfd500fac09caaab93084ed145532a521826a5093e7f8dd7",
"zh:6990eac4216fb8d7fcbe0a483cc1c6a077d0e970db84fb1c0b9032158b555c0e",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:939576f814ee4406131bdd3564cee041b05176d2e0a0b55e8081019348125e76",
"zh:a0545395bd6039f7c9998113ada4334717eb1c74fee4ece7da1d4f3e6d5ef7ba",
"zh:a086e5e4fdadcb0492f48074047954cc6c437b9ee57d9ec7ba850fb7cb5455a8",
"zh:c997156a7c23fa06304d7e22cfd64407e9ed69237c5780d20026521ce2be478d",
"zh:d47ad773cf50d703450cf301872cbc33938712a5ae491dfebf77611e1bcb0237",
"zh:d95de02ccc23416e2eefb689c94046a5dcb4c65ab96cebc61838c5b1ef70e1d3",
"zh:f166c7ed64c12978c4296d477ca508df82791648e6e9ff523268c1d361493851",
]
}

15
acls/consul_policies.tf Normal file
View File

@ -0,0 +1,15 @@
resource "consul_acl_policy" "server_policy" {
name = "consul-servers"
rules = <<EOH
node_prefix "server-" {
policy = "write"
}
node_prefix "" {
policy = "read"
}
service_prefix "" {
policy = "read"
}
EOH
}

20
acls/consul_vault.tf Normal file
View File

@ -0,0 +1,20 @@
resource "vault_consul_secret_backend" "config" {
path = "consul"
description = "Manages the Consul backend"
address = "http://127.0.0.1:8300"
# Using root token here, do consul tokens expire?
token = var.consul_token
}
resource "vault_consul_secret_backend_role" "consul_servers" {
name = "consul-servers"
backend = vault_consul_secret_backend.config.path
consul_policies = [
"consul-servers"
]
max_ttl = 240
ttl = 120
}

View File

@ -1,12 +1,6 @@
namespace "*" {
policy = "write"
capabilities = ["alloc-node-exec"]
variables {
path "*" {
capabilities = ["write", "read", "destroy"]
}
}
}
agent {

View File

@ -1,5 +1,5 @@
namespace "*" {
policy = "read"
policy = "read"
}
agent {

View File

@ -1,7 +1,7 @@
resource "nomad_acl_policy" "anon_policy" {
name = "anonymous"
description = "Anon read only"
rules_hcl = file("${path.module}/nomad-anon-policy.hcl")
description = "Anon RO"
rules_hcl = file("${path.module}/nomad-anon-bootstrap.hcl")
}
resource "nomad_acl_policy" "admin" {
@ -10,9 +10,9 @@ resource "nomad_acl_policy" "admin" {
rules_hcl = file("${path.module}/nomad-admin-policy.hcl")
}
# TODO: (security) Limit this scope
# TODO: Limit this scope
resource "nomad_acl_policy" "deploy" {
name = "deploy"
description = "Write for job deployments"
description = "Admin RW"
rules_hcl = file("${path.module}/nomad-deploy-policy.hcl")
}

View File

@ -1,17 +0,0 @@
resource "nomad_acl_role" "admin" {
name = "admin"
description = "Nomad administrators"
policy {
name = nomad_acl_policy.admin.name
}
}
resource "nomad_acl_role" "deploy" {
name = "deploy"
description = "Authorized to conduct deployments and view logs"
policy {
name = nomad_acl_policy.deploy.name
}
}

47
acls/nomad_vault.tf Normal file
View File

@ -0,0 +1,47 @@
# Set up nomad provider in vault for Nomad ACLs
resource "nomad_acl_token" "vault" {
name = "vault"
type = "management"
}
resource "vault_nomad_secret_backend" "config" {
backend = "nomad"
description = "Nomad ACL"
token = nomad_acl_token.vault.secret_id
default_lease_ttl_seconds = "3600"
max_lease_ttl_seconds = "7200"
max_ttl = "240"
ttl = "120"
}
# Vault roles generating Nomad tokens
resource "vault_nomad_secret_role" "nomad-deploy" {
backend = vault_nomad_secret_backend.config.backend
role = "nomad-deploy"
# Nomad policies
policies = ["deploy"]
}
resource "vault_nomad_secret_role" "admin-management" {
backend = vault_nomad_secret_backend.config.backend
role = "admin-management"
type = "management"
}
resource "vault_nomad_secret_role" "admin" {
backend = vault_nomad_secret_backend.config.backend
role = "admin"
# Nomad policies
policies = ["admin"]
}
# Nomad Vault token access
resource "vault_token_auth_backend_role" "nomad-cluster" {
role_name = "nomad-cluster"
token_explicit_max_ttl = 0
allowed_policies = ["access-tables", "nomad-task"]
orphan = true
token_period = 259200
renewable = true
}

17
acls/nomad_vault_db.tf Normal file
View File

@ -0,0 +1,17 @@
# resource "vault_mount" "db" {
# path = "database"
# type = "database"
# }
#
# resource "vault_database_secret_backend_connection" "mysql" {
# backend = vault_mount.db.path
# name = "mysql"
# allowed_roles = ["accessdb"]
#
# mysql {
# # How to give access here?
# connection_url = "{{username}}:{{password}}@tcp(mysql-server.service.consul:3306)"
# username = ""
# password = ""
# }
# }

View File

@ -1,6 +1,47 @@
terraform {
required_providers {
vault = {
source = "hashicorp/vault"
version = "3.8.0"
}
}
}
# Configure Consul provider
provider "consul" {
address = var.consul_address
}
# Get Nomad client from Consul
data "consul_service" "nomad" {
name = "nomad-client"
}
# Get Vault client from Consul
data "consul_service" "vault" {
name = "vault"
tag = "active"
}
locals {
# Get Nomad address from Consul
nomad_node = data.consul_service.nomad.service[0]
nomad_node_address = "http://${local.nomad_node.node_address}:${local.nomad_node.port}"
# Get Vault address from Consul
vault_node = data.consul_service.vault.service[0]
vault_node_address = "http://${local.vault_node.node_address}:${local.vault_node.port}"
}
# Configure the Nomad provider
provider "nomad" {
address = var.nomad_address
address = local.nomad_node_address
secret_id = var.nomad_secret_id
region = "global"
}
# Configure the Vault provider
provider "vault" {
address = local.vault_node_address
token = var.vault_token
}

View File

@ -1,11 +1,21 @@
variable "consul_address" {
type = string
default = "http://n1.thefij:8500"
}
variable "consul_token" {
type = string
description = "Token for setting up consul"
sensitive = true
}
variable "nomad_secret_id" {
type = string
description = "Secret ID for ACL bootstrapped Nomad"
sensitive = true
default = ""
}
variable "nomad_address" {
type = string
default = "http://n1.thefij:4646"
variable "vault_token" {
type = string
sensitive = true
}

8
acls/vault_login.tf Normal file
View File

@ -0,0 +1,8 @@
resource "vault_auth_backend" "userpass" {
type = "userpass"
tune {
max_lease_ttl = "1h"
listing_visibility = "unauth"
}
}

83
acls/vault_policies.tf Normal file
View File

@ -0,0 +1,83 @@
resource "vault_policy" "admin" {
name = "admin"
policy = <<EOF
path "*" {
capabilities = ["create", "read", "update", "delete", "list", "sudo"]
}
EOF
}
resource "vault_policy" "nomad-deploy" {
name = "nomad-deploy"
policy = <<EOH
path "nomad/creds/nomad-deploy" {
capabilities = ["read"]
}
EOH
}
# Policy for clusters
resource "vault_policy" "nomad-task" {
name = "nomad-task"
policy = <<EOH
path "kv/data/*" {
# Does this need create, update, delete?
capabilities = ["create", "read", "update", "delete", "list"]
}
EOH
}
# Policy for nomad tokens
resource "vault_policy" "nomad-server" {
name = "nomad-server"
policy = <<EOH
# Allow creating tokens under "nomad-cluster" token role. The token role name
# should be updated if "nomad-cluster" is not used.
path "auth/token/create/nomad-cluster" {
capabilities = ["update"]
}
# Allow looking up "nomad-cluster" token role. The token role name should be
# updated if "nomad-cluster" is not used.
path "auth/token/roles/nomad-cluster" {
capabilities = ["read"]
}
# Allow looking up the token passed to Nomad to validate # the token has the
# proper capabilities. This is provided by the "default" policy.
path "auth/token/lookup-self" {
capabilities = ["read"]
}
# Allow looking up incoming tokens to validate they have permissions to access
# the tokens they are requesting. This is only required if
# `allow_unauthenticated` is set to false.
path "auth/token/lookup" {
capabilities = ["update"]
}
# Allow revoking tokens that should no longer exist. This allows revoking
# tokens for dead tasks.
path "auth/token/revoke-accessor" {
capabilities = ["update"]
}
# Allow checking the capabilities of our own token. This is used to validate the
# token upon startup.
path "sys/capabilities-self" {
capabilities = ["update"]
}
# Allow our own token to be renewed.
path "auth/token/renew-self" {
capabilities = ["update"]
}
# This section grants all access on "secret/*". Further restrictions can be
# applied to this broad policy, as shown below.
path "kv/data/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
EOH
}

View File

@ -1,7 +0,0 @@
[defaults]
inventory=ansible_playbooks/ansible_hosts.yml
collections_paths=ansible_galaxy
roles_path=ansible_galaxy/roles
[inventory]
enable_plugins=yaml

49
ansible_hosts.yml Normal file
View File

@ -0,0 +1,49 @@
---
all:
children:
servers:
hosts:
n1.thefij:
# consul_node_role: bootstrap
nomad_node_role: both
nomad_unique_host_volumes:
- name: mysql-data
path: /srv/volumes/mysql
owner: "root"
group: "bin"
mode: "0755"
read_only: false
n2.thefij:
nomad_node_role: both
nomad_unique_host_volumes:
- name: nextcloud-data
path: /srv/volumes/nextcloud
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: gitea-data
path: /srv/volumes/gitea
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: authentik-data
path: /srv/volumes/gitea
owner: "root"
group: "bin"
mode: "0755"
read_only: false
n3.thefij:
nomad_node_class: ingress
nomad_node_role: both
consul_instances:
children:
servers: {}
vault_instances:
children:
servers: {}
nomad_instances:
children:
servers: {}

View File

@ -1,72 +0,0 @@
---
all:
hosts:
n1.thefij:
nomad_node_class: ingress
nomad_reserved_memory: 1024
# nomad_meta:
# hw_transcode.device: /dev/dri
# hw_transcode.type: intel
nfs_mounts:
- src: 10.50.250.2:/srv/volumes
path: /srv/volumes/moxy
opts: proto=tcp,rw
nomad_unique_host_volumes:
- name: mysql-data
path: /srv/volumes/mysql
owner: "999"
group: "100"
mode: "0755"
read_only: false
- name: postgres-data
path: /srv/volumes/postgres
owner: "999"
group: "999"
mode: "0755"
read_only: false
# n2.thefij:
# nomad_node_class: ingress
# nomad_reserved_memory: 1024
# nfs_mounts:
# - src: 10.50.250.2:/srv/volumes
# path: /srv/volumes/moxy
# opts: proto=tcp,rw
# nomad_unique_host_volumes:
# - name: nextcloud-data
# path: /srv/volumes/nextcloud
# owner: "root"
# group: "bin"
# mode: "0755"
# read_only: false
pi4:
nomad_node_class: ingress
nomad_reserved_memory: 512
nomad_meta:
hw_transcode.device: /dev/video11
hw_transcode.type: raspberry
qnomad.thefij:
ansible_host: 192.168.2.234
nomad_reserved_memory: 1024
# This VM uses a non-standard interface
nomad_network_interface: ens3
nomad_instances:
vars:
nomad_network_interface: eth0
children:
nomad_servers: {}
nomad_clients: {}
nomad_servers:
hosts:
nonopi.thefij:
ansible_host: 192.168.2.170
n1.thefij: {}
# n2.thefij: {}
pi4: {}
# qnomad.thefij: {}
nomad_clients:
hosts:
n1.thefij: {}
# n2.thefij: {}
pi4: {}
# qnomad.thefij: {}

View File

@ -1,43 +0,0 @@
# Stops Nomad and clears all data from its ata dirs
---
- name: Delete Nomad data
hosts: nomad_instances
tasks:
- name: Stop nomad
systemd:
name: nomad
state: stopped
become: true
- name: Kill nomad
shell:
cmd: systemctl kill nomad
become: true
- name: Stop all containers
shell:
cmd: docker ps -a | awk '/^[0-9abcdef]/{print $1}' | xargs -r docker stop
become: true
- name: Remove all containers
shell:
cmd: docker ps -a | awk '/^[0-9abcdef]/{print $1}' | xargs -r docker rm
become: true
- name: Unmount secrets
shell:
cmd: mount | awk '/nomad/ {print $3}' | xargs -n1 -r umount
become: true
- name: Remove data dir
file:
path: /var/nomad
state: absent
become: true
- name: Remove data dir
file:
path: /opt/nomad/data
state: absent
become: true

View File

@ -1,27 +0,0 @@
- name: Restart nomad and wesher
hosts: nomad_instances
tasks:
- name: Stop Nomad
systemd:
name: nomad
state: stopped
become: true
- name: Restart wesher
systemd:
name: wesher
state: restarted
become: true
- name: Start Docker
systemd:
name: docker
state: started
become: true
- name: Start Nomad
systemd:
name: nomad
state: started
become: true

View File

@ -1,49 +0,0 @@
---
- name: Recover Nomad
hosts: nomad_servers
any_errors_fatal: true
tasks:
- name: Stop Nomad
systemd:
name: nomad
state: stopped
become: true
- name: Remount all shares
command: mount -a
become: true
- name: Get node-id
slurp:
src: /var/nomad/server/node-id
register: nomad_node_id
become: true
- name: Node Info
debug:
msg: |
node_id: {{ nomad_node_id.content | b64decode }}
address: {{ ansible_default_ipv4.address }}
- name: Save
copy:
dest: /var/nomad/server/raft/peers.json
# I used to have reject('equalto', inventory_hostname) in the loop, but I'm not sure if I should
content: |
[
{% for host in ansible_play_hosts -%}
{
"id": "{{ hostvars[host].nomad_node_id.content | b64decode }}",
"address": "{{ hostvars[host].ansible_default_ipv4.address }}:4647",
"non_voter": false
}{% if not loop.last %},{% endif %}
{% endfor -%}
]
become: true
- name: Restart Nomad
systemd:
name: nomad
state: restarted
become: true

View File

@ -1,380 +0,0 @@
---
- name: Update DNS for bootstrapping with non-Nomad host
hosts: nomad_instances
become: true
gather_facts: false
vars:
non_nomad_dns: 192.168.2.170
tasks:
- name: Add non-nomad bootstrap DNS
lineinfile:
dest: /etc/resolv.conf
create: true
line: "nameserver {{ non_nomad_dns }}"
- name: Install Docker
hosts: nomad_clients
become: true
vars:
docker_architecture_map:
x86_64: amd64
armv7l: armhf
aarch64: arm64
docker_apt_arch: "{{ docker_architecture_map[ansible_architecture] }}"
docker_compose_arch: "{{ (ansible_architecture == 'armv7l') | ternary('armv7', ansible_architecture) }}"
roles:
- geerlingguy.docker
tasks:
- name: Remove snapd
package:
name: snapd
state: absent
# Not on Ubuntu 20.04
# - name: Install Podman
# hosts: nomad_instances
# become: true
#
# tasks:
# - name: Install Podman
# package:
# name: podman
# state: present
- name: Create NFS mounts
hosts: nomad_clients
become: true
vars:
shared_nfs_mounts:
- src: 192.168.2.10:/Media
path: /srv/volumes/media-read
opts: proto=tcp,port=2049,ro
- src: 192.168.2.10:/Media
path: /srv/volumes/media-write
opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Overflow
path: /srv/volumes/nas-overflow
opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Photos
path: /srv/volumes/photos
opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Container
path: /srv/volumes/nas-container
opts: proto=tcp,port=2049,rw
tasks:
- name: Install nfs
package:
name: nfs-common
state: present
- name: Mount NFS volumes
ansible.posix.mount:
src: "{{ item.src }}"
path: "{{ item.path }}"
opts: "{{ item.opts }}"
state: mounted
fstype: nfs4
loop: "{{ shared_nfs_mounts + (nfs_mounts | default([])) }}"
- import_playbook: wesher.yml
- name: Build Nomad cluster
hosts: nomad_instances
any_errors_fatal: true
become: true
vars:
shared_host_volumes:
- name: media-read
path: /srv/volumes/media-write
read_only: true
- name: media-write
path: /srv/volumes/media-write
owner: "root"
group: "root"
mode: "0755"
read_only: false
- name: media-overflow-write
path: /srv/volumes/nas-overflow/Media
owner: "root"
group: "root"
mode: "0755"
read_only: false
- name: media-downloads
path: /srv/volumes/media-write/Downloads
read_only: false
- name: sabnzbd-config
path: /srv/volumes/media-write/Downloads/sabnzbd
read_only: false
- name: photoprism-media
path: /srv/volumes/photos/Photoprism
read_only: false
- name: photoprism-storage
path: /srv/volumes/nas-container/photoprism
read_only: false
- name: nzbget-config
path: /srv/volumes/nas-container/nzbget
read_only: false
- name: sonarr-config
path: /srv/volumes/nas-container/sonarr
read_only: false
- name: lidarr-config
path: /srv/volumes/nas-container/lidarr
read_only: false
- name: radarr-config
path: /srv/volumes/nas-container/radarr
read_only: false
- name: bazarr-config
path: /srv/volumes/nas-container/bazarr
read_only: false
- name: gitea-data
path: /srv/volumes/nas-container/gitea
read_only: false
- name: ytdl-web
path: /srv/volumes/nas-container/ytdl-web
read_only: false
- name: christmas-community
path: /srv/volumes/nas-container/christmas-community
read_only: false
- name: all-volumes
path: /srv/volumes
owner: "root"
group: "root"
mode: "0755"
read_only: false
roles:
- name: ansible-nomad
vars:
nomad_version: "1.9.3-1"
nomad_install_upgrade: true
nomad_allow_purge_config: true
nomad_node_role: "{% if 'nomad_clients' in group_names %}{% if 'nomad_servers' in group_names %}both{% else %}client{% endif %}{% else %}server{% endif %}"
# Where nomad gets installed to
nomad_bin_dir: /usr/bin
nomad_install_from_repo: true
nomad_bootstrap_expect: "{{ [(play_hosts | length), 3] | min }}"
nomad_raft_protocol: 3
nomad_autopilot: true
nomad_encrypt_enable: true
# nomad_use_consul: true
# Metrics
nomad_telemetry: true
nomad_telemetry_prometheus_metrics: true
nomad_telemetry_publish_allocation_metrics: true
nomad_telemetry_publish_node_metrics: true
# Enable container plugins
nomad_cni_enable: true
nomad_cni_version: 1.0.1
nomad_docker_enable: true
nomad_docker_dmsetup: false
# nomad_podman_enable: true
# Merge shared host volumes with node volumes
nomad_host_volumes: "{{ shared_host_volumes + (nomad_unique_host_volumes | default([])) }}"
# Customize docker plugin
nomad_plugins:
docker:
config:
allow_privileged: true
gc:
image_delay: "24h"
volumes:
enabled: true
selinuxlabel: "z"
# Send logs to journald so we can scrape them for Loki
# logging:
# type: journald
extra_labels:
- "job_name"
- "job_id"
- "task_group_name"
- "task_name"
- "namespace"
- "node_name"
- "node_id"
# Bind nomad
nomad_bind_address: 0.0.0.0
# Default interface for binding tasks
# This is now set at the inventory level
# nomad_network_interface: eth0
# Create networks for binding task ports
nomad_host_networks:
- name: loopback
interface: lo
reserved_ports: "22"
- name: wesher
interface: wgoverlay
reserved_ports: "22"
# Enable ACLs
nomad_acl_enabled: true
nomad_config_custom:
ui:
enabled: true
- name: Bootstrap Nomad ACLs and scheduler
hosts: nomad_servers
tasks:
- name: Start Nomad
systemd:
state: started
name: nomad
- name: Nomad API reachable?
uri:
url: "http://127.0.0.1:4646/v1/status/leader"
method: GET
status_code: 200
register: nomad_check_result
retries: 8
until: nomad_check_result is succeeded
delay: 15
changed_when: false
run_once: true
- name: Bootstrap ACLs
command:
argv:
- "nomad"
- "acl"
- "bootstrap"
- "-json"
run_once: true
ignore_errors: true
register: bootstrap_result
changed_when: bootstrap_result is succeeded
- name: Save bootstrap result
copy:
content: "{{ bootstrap_result.stdout }}"
dest: "../nomad_bootstrap.json"
when: bootstrap_result is succeeded
delegate_to: localhost
run_once: true
- name: Read secret
command:
argv:
- jq
- -r
- .SecretID
- ../nomad_bootstrap.json
delegate_to: localhost
run_once: true
no_log: true
changed_when: false
register: read_secretid
- name: Look for policy
command:
argv:
- nomad
- acl
- policy
- list
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
register: policies
run_once: true
changed_when: false
- name: Copy policy
copy:
src: ../acls/nomad-anon-policy.hcl
dest: /tmp/anonymous.policy.hcl
delegate_to: "{{ play_hosts[0] }}"
run_once: true
register: anon_policy
- name: Create anon-policy
command:
argv:
- nomad
- acl
- policy
- apply
- -description=Anon read only
- anonymous
- /tmp/anonymous.policy.hcl
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
when: policies.stdout == "No policies found" or anon_policy.changed
delegate_to: "{{ play_hosts[0] }}"
run_once: true
- name: Read scheduler config
command:
argv:
- nomad
- operator
- scheduler
- get-config
- -json
run_once: true
register: scheduler_config
changed_when: false
- name: Enable service scheduler preemption
command:
argv:
- nomad
- operator
- scheduler
- set-config
- -preempt-service-scheduler=true
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
run_once: true
when: (scheduler_config.stdout | from_json)["SchedulerConfig"]["PreemptionConfig"]["ServiceSchedulerEnabled"] is false
- name: Enable system scheduler preemption
command:
argv:
- nomad
- operator
- scheduler
- set-config
- -preempt-system-scheduler=true
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
run_once: true
when: (scheduler_config.stdout | from_json)["SchedulerConfig"]["PreemptionConfig"]["SystemSchedulerEnabled"] is false
# - name: Set up Nomad backend and roles in Vault
# community.general.terraform:
# project_path: ../acls
# force_init: true
# variables:
# consul_address: "{{ play_hosts[0] }}:8500"
# vault_token: "{{ root_token }}"
# nomad_secret_id: "{{ read_secretid.stdout }}"
# delegate_to: localhost
# run_once: true
# notify:
# - Restart Nomad
handlers:
- name: Restart Nomad
systemd:
state: restarted
name: nomad
retries: 6
delay: 5

View File

@ -1,10 +0,0 @@
---
- name: Stop Nomad
hosts: nomad_instances
tasks:
- name: Stop Nomad
systemd:
name: nomad
state: stopped
become: true

View File

@ -1,162 +0,0 @@
nomad/jobs:
base_hostname: VALUE
db_user_ro: VALUE
ldap_base_dn: VALUE
notify_email: VALUE
nomad/jobs/authelia:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
email_sender: VALUE
jwt_secret: VALUE
oidc_clients: VALUE
oidc_hmac_secret: VALUE
oidc_issuer_certificate_chain: VALUE
oidc_issuer_private_key: VALUE
session_secret: VALUE
storage_encryption_key: VALUE
nomad/jobs/authelia/authelia/stunnel:
redis_stunnel_psk: VALUE
nomad/jobs/backup:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-n1:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-n2:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-pi4:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/bazarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/blocky:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
mappings: VALUE
whitelists_ads: VALUE
nomad/jobs/blocky/blocky/stunnel:
redis_stunnel_psk: VALUE
nomad/jobs/ddclient:
domain: VALUE
domain_ddclient: VALUE
zone: VALUE
nomad/jobs/diun:
slack_hook_url: VALUE
nomad/jobs/git:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
oidc_secret: VALUE
secret_key: VALUE
smtp_sender: VALUE
nomad/jobs/grafana:
admin_pw: VALUE
alert_email_addresses: VALUE
db_name: VALUE
db_pass: VALUE
db_pass_ro: VALUE
db_user: VALUE
db_user_ro: VALUE
minio_access_key: VALUE
minio_secret_key: VALUE
oidc_secret: VALUE
slack_bot_token: VALUE
slack_bot_url: VALUE
slack_hook_url: VALUE
smtp_password: VALUE
smtp_user: VALUE
nomad/jobs/immich:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/lego:
acme_email: VALUE
domain_lego_dns: VALUE
usersfile: VALUE
nomad/jobs/lidarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/lldap:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
jwt_secret: VALUE
key_seed: VALUE
smtp_from: VALUE
smtp_reply_to: VALUE
nomad/jobs/minitor:
mailgun_api_key: VALUE
nomad/jobs/mysql-server:
mysql_root_password: VALUE
nomad/jobs/photoprism:
admin_password: VALUE
admin_user: VALUE
db_name: VALUE
db_pass: VALUE
db_user: VALUE
oidc_secret: VALUE
nomad/jobs/postgres-server:
superuser: VALUE
superuser_pass: VALUE
nomad/jobs/radarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/redis-authelia:
allowed_psks: VALUE
nomad/jobs/redis-blocky:
allowed_psks: VALUE
nomad/jobs/rediscommander:
redis_stunnel_psk: VALUE
nomad/jobs/sonarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/traefik:
external: VALUE
usersfile: VALUE
nomad/jobs/unifi-traffic-route-ips:
unifi_password: VALUE
unifi_username: VALUE
nomad/jobs/wishlist:
guest_password: VALUE
nomad/oidc:
secret: VALUE
secrets/ldap:
admin_email: VALUE
admin_password: VALUE
admin_user: VALUE
secrets/mysql:
mysql_root_password: VALUE
secrets/postgres:
superuser: VALUE
superuser_pass: VALUE
secrets/smtp:
password: VALUE
port: VALUE
server: VALUE
tls: VALUE
user: VALUE

View File

@ -1,2 +0,0 @@
---
wesher_key: "{{ vault_wesher_key }}"

View File

@ -1,53 +0,0 @@
- name: Create overlay network
hosts: nomad_instances
become: true
vars_files:
- vars/wesher_vars.yml
- vars/vault_wesher_vars.yml
vars:
wesher_key: "{{ wesher_key }}"
wesher_version: v0.2.6
wesher_arch_map:
x86_64: amd64
armv7l: arm
aarch64: arm64
wesher_arch: "{{ wesher_arch_map[ansible_architecture] }}"
# wesher_sha256_map:
# x86_64: 8c551ca211d7809246444765b5552a8d1742420c64eff5677d1e27a34c72aeef
# armv7l: 97f5bbf2b00b8b11a4ca224540bf9c1affdb15432c3b6ad8da4c1a7b6175eb5d
# aarch64: 507c6397d67ea90bddb3e1c06ec9d8e38d4342ed6f0f6b47855fecc9f1d6fae0
# wesher_checksum: sha256:{{ wesher_sha256_map[ansible_architecture] }}
wesher_checksum: sha256:https://github.com/costela/wesher/releases/download/{{ wesher_version }}/wesher.sha256sums
tasks:
- name: Download wesher
get_url:
url: https://github.com/costela/wesher/releases/download/{{ wesher_version }}/wesher-{{ wesher_arch }}
dest: /usr/local/sbin/wesher
checksum: "{{ wesher_checksum }}"
owner: root
mode: "0755"
- name: Install systemd unit
get_url:
url: https://github.com/costela/wesher/raw/{{ wesher_version }}/dist/wesher.service
dest: /etc/systemd/system/wesher.service
- name: Write wesher config
lineinfile:
path: /etc/default/wesher
create: true
regexp: "^{{ item.split('=')[0] }}"
line: "{{ item }}"
owner: root
mode: "0600"
loop:
- WESHER_CLUSTER_KEY={{ wesher_key }}
- WESHER_JOIN={% for host in ansible_play_hosts %}{{ hostvars[host].ansible_default_ipv4.address }}{% if not loop.last %},{% endif %}{% endfor %}
- name: Start wesher
systemd:
name: wesher.service
daemon_reload: true
state: started
enabled: true

View File

@ -2,39 +2,20 @@
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.0.0"
version = "1.4.16"
hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}

View File

@ -1,29 +1,23 @@
job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
variable "nextcloud_backup" {
type = string
description = "HCL config for Restic Scheduler jobs"
}
variable "consul_backup" {
type = string
description = "HCL config for Restic Scheduler jobs"
}
job "backup" {
datacenters = ["dc1"]
priority = 90
%{ if batch_node == null ~}
type = "system"
%{ else ~}
type = "batch"
parameterized {
meta_required = ["job_name"]
meta_optional = ["task", "snapshot"]
}
meta {
task = "backup"
snapshot = "latest"
}
%{ endif ~}
%{ if batch_node != null ~}
constraint {
attribute = "$${node.unique.name}"
value = "${batch_node}"
attribute = "${node.unique.name}"
# Only node with a backup job so far
# Remove when backing up all nodes
value = "n2"
}
%{ endif ~}
group "backup" {
@ -31,221 +25,130 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
mode = "bridge"
port "metrics" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 8080
}
}
volume "all-volumes" {
type = "host"
read_only = false
read_only = true
source = "all-volumes"
}
ephemeral_disk {
# Try to keep restic cache intact
sticky = true
}
service {
name = "backup"
provider = "nomad"
port = "metrics"
tags = [
"prometheus.scrape"
]
# Add connect to mysql
connect {
sidecar_service {
proxy {
local_service_port = 8080
upstreams {
destination_name = "mysql-server"
local_bind_port = 6060
}
config {
protocol = "tcp"
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
meta {
metrics_addr = "${NOMAD_ADDR_metrics}"
}
}
task "backup" {
driver = "docker"
shutdown_delay = "5m"
volume_mount {
volume = "all-volumes"
destination = "/data"
read_only = false
read_only = true
}
config {
image = "iamthefij/restic-scheduler:0.4.2"
image = "iamthefij/resticscheduler"
ports = ["metrics"]
args = [
"--push-gateway",
"http://pushgateway.nomad:9091",
%{ if batch_node != null ~}
"-once",
"-$${NOMAD_META_task}",
"$${NOMAD_META_job_name}",
"--snapshot",
"$${NOMAD_META_snapshot}",
%{ endif ~}
"$${NOMAD_TASK_DIR}/node-jobs.hcl",
"/jobs/node-jobs.hcl",
]
mount {
type = "bind"
target = "/jobs"
source = "jobs"
}
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
action "unlockenv" {
command = "sh"
args = ["-c", "/bin/restic-scheduler -once -unlock all $${NOMAD_TASK_DIR}/node-jobs.hcl"]
}
action "unlocktmpl" {
command = "/bin/restic-scheduler"
args = ["-once", "-unlock", "all", "{{ env 'NOMAD_TASK_DIR' }}/node-jobs.hcl"]
}
action "unlockhc" {
command = "/bin/restic-scheduler"
args = ["-once", "-unlock", "all", "/local/node-jobs.hcl"]
}
action "backupall" {
command = "/bin/restic-scheduler"
args = ["-once", "-backup", "all", "/local/node-jobs.hcl"]
}
action "backupallenv" {
command = "sh"
args = ["-c", "/bin/restic-scheduler -once -backup all $${NOMAD_TASK_DIR}/node-jobs.hcl"]
}
env = {
RCLONE_CHECKERS = "2"
RCLONE_TRANSFERS = "2"
RCLONE_FTP_CONCURRENCY = "5"
RESTIC_CACHE_DIR = "$${NOMAD_ALLOC_DIR}/data"
TZ = "America/Los_Angeles"
"MYSQL_HOST" = "${NOMAD_UPSTREAM_IP_mysql_server}"
"MYSQL_PORT" = "${NOMAD_UPSTREAM_PORT_mysql_server}"
}
template {
# Probably want to use database credentials that have access to dump all tables
data = <<EOF
MYSQL_HOST=127.0.0.1
MYSQL_PORT=3306
{{ with nomadVar "secrets/mysql" }}
MYSQL_USER=root
MYSQL_PASSWORD={{ .mysql_root_password }}
{{ end -}}
{{ with nomadVar "secrets/postgres" }}
POSTGRES_HOST=127.0.0.1
POSTGRES_PORT=5432
POSTGRES_USER={{ .superuser }}
POSTGRES_PASSWORD={{ .superuser_password }}
{{ end -}}
{{ with nomadVar (print "nomad/jobs/" (index (env "NOMAD_JOB_ID" | split "/") 0)) -}}
BACKUP_PASSPHRASE={{ .backup_passphrase }}
RCLONE_FTP_HOST={{ .nas_ftp_host }}
RCLONE_FTP_USER={{ .nas_ftp_user }}
RCLONE_FTP_PASS={{ .nas_ftp_pass.Value | toJSON }}
RCLONE_FTP_EXPLICIT_TLS=true
RCLONE_FTP_NO_CHECK_CERTIFICATE=true
AWS_ACCESS_KEY_ID={{ .nas_minio_access_key_id }}
AWS_SECRET_ACCESS_KEY={{ .nas_minio_secret_access_key }}
{{ end -}}
{{ with secret "kv/data/nextcloud" }}
MYSQL_DATABASE={{ .Data.data.db_name }}
MYSQL_USER={{ .Data.data.db_user }}
MYSQL_PASSWORD={{ .Data.data.db_pass }}
{{ end }}
{{ with secret "kv/data/backups" }}
BACKUP_PASSPHRASE={{ .Data.data.backup_passphrase }}
{{ end }}
EOF
destination = "secrets/db.env"
env = true
}
template {
data = <<EOH
CONSUL_HTTP_ADDR={{ env "attr.unique.network.ip-address" }}:8500
EOH
destination = "local/consul.env"
env = true
}
template {
# Build jobs based on node
data = <<EOF
# Current node is {{ env "node.unique.name" }} {{ env "node.unique.id" }}
%{ for job_file in fileset(module_path, "jobs/*.hcl") ~}
{{ range nomadService 1 "backups" "${trimsuffix(basename(job_file), ".hcl")}" -}}
# ${trimsuffix(basename(job_file), ".hcl")} .Node {{ .Node }}
{{ if eq .Node (env "node.unique.id") -}}
${file("${module_path}/${job_file}")}
# Current node is {{ env "node.unique.name" }}
# Consul backup below?
{{ if eq (env "node.unique.name") "n2" -}}
# Consul backup
${var.consul_backup}
{{ end -}}
{{ end -}}
%{ endfor ~}
# Dummy job to keep task healthy on node without any stateful services
job "Dummy" {
schedule = "@daily"
config {
repo = "/local/dummy-repo"
passphrase = env("BACKUP_PASSPHRASE")
}
backup {
paths = ["/local/node-jobs.hcl"]
}
forget {
KeepLast = 1
}
}
{{ range service "nextcloud" }}
# Nextcloud .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") }}
${var.nextcloud_backup}
{{ end }}{{ end }}
EOF
destination = "local/node-jobs.hcl"
destination = "jobs/node-jobs.hcl"
}
resources {
cpu = 50
memory = 500
}
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" }}
connect = {{ .Address }}:{{ .Port }}
{{ end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
[postgres_client]
client = yes
accept = 127.0.0.1:5432
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "postgres-tls" }}
connect = {{ .Address }}:{{ .Port }}
{{ end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/postgres_stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/backups" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
template {
data = <<EOF
{{- with nomadVar "secrets/postgres/allowed_psks/backups" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/postgres_stunnel_psk.txt"
memory = 256
}
}
}

View File

@ -1,136 +1,27 @@
resource "nomad_job" "backup" {
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
batch_node = null,
use_wesher = var.use_wesher
})
}
resource "nomad_job" "backup-oneoff" {
# TODO: Get list of nomad hosts dynamically
for_each = toset(["n1", "pi4"])
# for_each = toset([
# for node in data.consul_service.nomad.service :
# node.node_name
# ])
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
batch_node = each.key,
use_wesher = var.use_wesher
})
}
locals {
# NOTE: This can't be dynamic in first deploy since these values are not known
# all_job_ids = toset(flatten([[for job in resource.nomad_job.backup-oneoff : job.id], [resource.nomad_job.backup.id]]))
all_job_ids = toset(["backup", "backup-oneoff-n1", "backup-oneoff-pi4"])
nextcloud_backup = file("${path.module}/jobs/nextcloud.hcl")
}
resource "nomad_acl_policy" "secrets_mysql" {
for_each = local.all_job_ids
name = "${each.key}-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
resource "nomad_job" "backups" {
hcl2 {
enabled = true
vars = {
"nextcloud_backup" = "${local.nextcloud_backup}",
"consul_backup" = file("${path.module}/jobs/consul.hcl"),
}
}
}
EOH
job_acl {
job_id = each.key
}
jobspec = file("${path.module}/backup.nomad")
}
resource "random_password" "mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "mysql_psk" {
path = "secrets/mysql/allowed_psks/backups"
items = {
psk = "backups:${resource.random_password.mysql_psk.result}"
}
}
resource "nomad_acl_policy" "mysql_psk" {
for_each = local.all_job_ids
name = "${each.key}-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/backups" {
capabilities = ["read"]
resource "nomad_job" "backups-oneoff" {
hcl2 {
enabled = true
vars = {
"nextcloud_backup" = "${local.nextcloud_backup}",
"consul_backup" = file("${path.module}/jobs/consul.hcl"),
}
}
}
EOH
job_acl {
job_id = each.key
group = "backup"
task = "stunnel"
}
}
resource "nomad_acl_policy" "secrets_postgres" {
for_each = local.all_job_ids
name = "${each.key}-secrets-postgres"
description = "Give access to Postgres secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
}
}
resource "random_password" "postgres_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "postgres_psk" {
path = "secrets/postgres/allowed_psks/backups"
items = {
psk = "backups:${resource.random_password.postgres_psk.result}"
}
}
resource "nomad_acl_policy" "postgres_psk" {
for_each = local.all_job_ids
name = "${each.key}-secrets-postgres-psk"
description = "Give access to Postgres PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres/allowed_psks/backups" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
group = "backup"
task = "stunnel"
}
jobspec = file("${path.module}/oneoff.nomad")
}

View File

@ -1,54 +0,0 @@
job "authelia" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/authelia"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local authelia dir" {
pre_script {
on_backup = "mkdir -p /local/authelia"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "authelia"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/authelia/dump.sql"
}
}
backup {
paths = ["/local/authelia"]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

45
backups/jobs/consul.hcl Normal file
View File

@ -0,0 +1,45 @@
job "Consul" {
schedule = "* * * * *"
config {
# TODO: Backup to a meaningful location, this is just for testing
repo = "/local/repo"
# Read from secret file
passphrase = env("BACKUP_PASSPHRASE")
}
# Remove when using a proper backup destination
task "Create dir for repo" {
pre_script {
on_backup = "echo 'Backing up something'"
}
pre_script {
on_backup = "mkdir -p /local/repo"
}
}
task "Use consul snapshots" {
pre_script {
on_backup = "mkdir -p /local/consul"
}
pre_script {
on_backup = "consul snapshot save /local/consul/backup.snap"
}
post_script {
on_restore = "consul snapshot restore /local/consul/backup.snap"
}
}
backup {
paths = ["/local/consul"]
# Because path is absolute
restore_opts {
Target = "/"
}
}
forget {
KeepLast = 2
Prune = true
}
}

View File

@ -1,57 +0,0 @@
job "git" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/gitea"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local gitea dir" {
pre_script {
on_backup = "mkdir -p /local/gitea"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "gitea"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/gitea/dump.sql"
}
}
backup {
paths = [
"/local/gitea",
"/data/nas-container/gitea",
]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,54 +0,0 @@
job "grafana" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/grafana"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local grafana dir" {
pre_script {
on_backup = "mkdir -p /local/grafana"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "grafana"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/grafana/dump.sql"
}
}
backup {
paths = ["/local/grafana"]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,64 +0,0 @@
job "lidarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/lidarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "lidarr"
no_tablespaces = true
dump_to = "/data/nas-container/lidarr/Backups/dump-lidarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "lidarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/lidarr/Backups/dump-lidarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/lidarr"]
backup_opts {
Exclude = [
"lidarr_backup_*.zip",
"/data/nas-container/lidarr/MediaCover",
"/data/nas-container/lidarr/logs",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,53 +0,0 @@
job "lldap" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/lldap"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local backup dir" {
pre_script {
on_backup = "mkdir -p /local/lldap"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
database = "lldap"
no_tablespaces = true
dump_to = "/local/lldap/dump.sql"
}
}
backup {
paths = ["/local/lldap"]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -0,0 +1,43 @@
job "Nextcloud" {
schedule = "* * * * *"
config {
# TODO: Backup to a meaningful location, this is just for testing
repo = "/local/repo"
# Read from secret file
passphrase = env("BACKUP_PASSPHRASE")
}
# Remove when using a proper backup destination
task "Create dir for repo" {
pre_script {
on_backup = "echo 'Backing up something'"
}
pre_script {
on_backup = "mkdir -p /local/repo"
}
}
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = env("MYSQL_DATABASE")
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/dump.sql"
}
backup {
paths = ["/data/nextcloud"]
# Because path is absolute
restore_opts {
Target = "/"
}
}
forget {
KeepLast = 2
Prune = true
}
}

View File

@ -1,41 +0,0 @@
job "nzbget" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/nzbget"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
backup {
paths = [
# Configuration
"/data/nas-container/nzbget",
# Queued nzb files
"/data/media-write/Downloads/nzb",
]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,60 +0,0 @@
job "photoprism" {
schedule = "10 * * * *"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/photoprism"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local photoprism dir" {
pre_script {
on_backup = "mkdir -p /local/photoprism"
}
}
task "Dump database" {
mysql "Dump database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "photoprism"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/photoprism/dump.sql"
}
}
backup {
paths = [
"/local/photoprism",
"/data/nas-container/photoprism",
]
backup_opts {
Host = "nomad"
Exclude = [
"/data/nas-container/photoprism/cache",
]
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,64 +0,0 @@
job "radarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/radarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "radarr"
no_tablespaces = true
dump_to = "/data/nas-container/radarr/Backups/dump-radarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "radarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/radarr/Backups/dump-radarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/radarr"]
backup_opts {
Exclude = [
"radarr_backup_*.zip",
"/data/nas-container/radarr/MediaCover",
"/data/nas-container/radarr/logs",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,36 +0,0 @@
job "sabnzbd" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/sabnzbd"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
backup {
paths = ["/data/media-write/Downloads/sabnzbd"]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,67 +0,0 @@
job "sonarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/sonarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "sonarr"
no_tablespaces = true
dump_to = "/data/nas-container/sonarr/Backups/dump-sonarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "sonarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/sonarr/Backups/dump-sonarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/sonarr"]
backup_opts {
Exclude = [
"sonarr_backup_*.zip",
"/data/nas-container/sonarr/MediaCover",
"/data/nas-container/sonarr/logs",
"*.db",
"*.db-shm",
"*.db-wal",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

171
backups/oneoff.nomad Normal file
View File

@ -0,0 +1,171 @@
variable "nextcloud_backup" {
type = string
description = "HCL config for Restic Scheduler jobs"
}
variable "consul_backup" {
type = string
description = "HCL config for Restic Scheduler jobs"
}
job "backup-oneoff-n2" {
datacenters = ["dc1"]
type = "batch"
parameterized {
meta_required = ["job_name"]
meta_optional = ["task", "snapshot"]
}
meta {
task = "backup"
snapshot = "latest"
}
constraint {
attribute = "${node.unique.name}"
# Only node with a backup job so far
# Remove when backing up all nodes
value = "n2"
}
group "backup" {
network {
mode = "bridge"
port "metrics" {
to = 8080
}
}
volume "all-volumes" {
type = "host"
read_only = true
source = "all-volumes"
}
service {
port = "metrics"
# Add connect to mysql
connect {
sidecar_service {
proxy {
local_service_port = 8080
upstreams {
destination_name = "mysql-server"
local_bind_port = 6060
}
config {
protocol = "tcp"
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
meta {
metrics_addr = "${NOMAD_ADDR_metrics}"
}
}
task "backup" {
driver = "docker"
volume_mount {
volume = "all-volumes"
destination = "/data"
read_only = true
}
config {
image = "iamthefij/resticscheduler"
ports = ["metrics"]
args = [
"-once",
"-${NOMAD_META_task}",
"${NOMAD_META_job_name}",
# TODO: add restore arg here
"/jobs/node-jobs.hcl",
]
mount {
type = "bind"
target = "/jobs"
source = "jobs"
}
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
env = {
"MYSQL_HOST" = "${NOMAD_UPSTREAM_IP_mysql_server}"
"MYSQL_PORT" = "${NOMAD_UPSTREAM_PORT_mysql_server}"
}
template {
# Probably want to use database credentials that have access to dump all tables
data = <<EOF
{{ with secret "kv/data/nextcloud" }}
MYSQL_DATABASE={{ .Data.data.db_name }}
MYSQL_USER={{ .Data.data.db_user }}
MYSQL_PASSWORD={{ .Data.data.db_pass }}
{{ end }}
{{ with secret "kv/data/backups" }}
BACKUP_PASSPHRASE={{ .Data.data.backup_passphrase }}
{{ end }}
EOF
destination = "secrets/db.env"
env = true
}
template {
data = <<EOH
CONSUL_HTTP_ADDR={{ env "attr.unique.network.ip-address" }}:8500
EOH
destination = "local/consul.env"
env = true
}
template {
# Build jobs based on node
data = <<EOF
# Current node is {{ env "node.unique.name" }}
# Consul backup below?
{{ if eq (env "node.unique.name") "n2" -}}
# Consul backup
${var.consul_backup}
{{ end -}}
{{ range service "nextcloud" }}
# Nextcloud .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") }}
${var.nextcloud_backup}
{{ end }}{{ end }}
EOF
destination = "jobs/node-jobs.hcl"
}
resources {
cpu = 50
memory = 256
}
}
}
}

View File

@ -1,5 +0,0 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

20
blocky/.terraform.lock.hcl generated Normal file
View File

@ -0,0 +1,20 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}

114
blocky/blocky.nomad Normal file
View File

@ -0,0 +1,114 @@
variable "config_data" {
type = string
description = "Plain text config file for blocky"
}
job "blocky" {
datacenters = ["dc1"]
type = "system"
priority = 100
update {
max_parallel = 1
auto_revert = true
}
group "blocky" {
network {
mode = "bridge"
port "dns" {
static = "53"
}
port "api" {
host_network = "loopback"
to = "4000"
}
}
service {
name = "blocky-dns"
port = "dns"
}
service {
name = "blocky-api"
port = "api"
meta {
metrics_addr = "${NOMAD_ADDR_api}"
}
tags = [
"traefik.enable=true",
]
connect {
sidecar_service {
proxy {
local_service_port = 400
expose {
path {
path = "/metrics"
protocol = "http"
local_path_port = 4000
listener_port = "api"
}
}
upstreams {
destination_name = "redis"
local_bind_port = 6379
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
memory_max = 50
}
}
}
check {
name = "api-health"
port = "api"
type = "http"
path = "/"
interval = "10s"
timeout = "3s"
}
}
task "blocky" {
driver = "docker"
config {
image = "ghcr.io/0xerr0r/blocky"
ports = ["dns", "api"]
mount {
type = "bind"
target = "/app/config.yml"
source = "app/config.yml"
}
}
resources {
cpu = 50
memory = 50
memory_max = 100
}
template {
data = var.config_data
destination = "app/config.yml"
}
}
}
}

25
blocky/blocky.tf Normal file
View File

@ -0,0 +1,25 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
locals {
config_data = templatefile(
"${path.module}/config.yml",
{
"base_hostname" = "${var.base_hostname}",
}
)
}
resource "nomad_job" "blocky" {
hcl2 {
enabled = true
vars = {
"config_data" = "${local.config_data}",
}
}
jobspec = file("${path.module}/blocky.nomad")
}

73
blocky/config.yml Normal file
View File

@ -0,0 +1,73 @@
upstream:
default:
- 1.1.1.1
- 1.0.0.1
quad9:
- 9.9.9.9
- 149.112.112.112
- 2620:fe::fe
- 2620:fe::9
- https://dns.quad9.net/dns-query
- tcp-tls:dns.quad9.net
quad9-unsecured:
- 9.9.9.10
- 149.112.112.10
- 2620:fe::10
- 2620:fe::fe:10
- https://dns10.quad9.net/dns-query
- tcp-tls:dns10.quad9.net
blocking:
blackLists:
ads:
- https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
- http://sysctl.org/cameleon/hosts
- https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt
- https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
- https://hosts-file.net/ad_servers.txt
smarttv:
- https://perflyst.github.io/PiHoleBlocklist/SmartTV.txt
- https://perflyst.github.io/PiHoleBlocklist/regex.list
malware:
- https://mirror1.malwaredomains.com/files/justdomains
whiteLists:
# Move to Gitea when deployed internally
ads:
{{ keyOrDefault "blocky/whitelists/ads" "# None" | indent 6 }}
clientGroupsBlock:
default:
- ads
- malware
- smarttv
customDNS:
customTTL: 1h
mapping:
{{ with service "traefik" -}}
{{- $last := len . | subtract 1 -}}
{{- $services := . -}}
{{ keyOrDefault "global/base_hostname" "${base_hostname}" }}: {{ range $i := loop $last -}}
{{- with index $services $i }}{{ .Address }},{{ end -}}
{{- end -}}
{{- with index . $last }}{{ .Address }}{{ end -}}
{{- end }}
# Other mappings
{{ keyOrDefault "blocky/mappings" "# None" | indent 4 }}
prometheus:
enable: true
redis:
address: {{ env "NOMAD_UPSTREAM_ADDR_redis" }}
# password: ""
# database: 0
connectionAttempts: 10
connectionCooldown: 3s
# queryLog:
# type: mysql
# target: db_user:db_password@tcp(db_host_or_ip:3306)/db_user?charset=utf8mb4&parseTime=True&loc=Local
# logRetentionDays: 7
port: 53
httpPort: 4000

64
bootstrap-values.yml Normal file
View File

@ -0,0 +1,64 @@
---
- name: Bootstrap Consul values
hosts: consul_instances
gather_facts: false
vars_files:
- consul_values.yml
tasks:
- name: Add values
delegate_to: localhost
run_once: true
block:
- name: Install python-consul
pip:
name: python-consul
extra_args: --index-url https://pypi.org/simple
- name: Write values
consul_kv:
host: "{{ inventory_hostname }}"
key: "{{ item.key }}"
value: "{{ item.value }}"
loop: "{{ consul_values | default({}) | dict2items }}"
- name: Bootstrap value values
hosts: vault_instances
gather_facts: false
vars_files:
- ./vault_hashi_vault_values.yml
tasks:
- name: Bootstrap Vault secrets
delegate_to: localhost
run_once: true
block:
- name: Install hvac
pip:
name: hvac
extra_args: --index-url https://pypi.org/simple
# This fails on first run because `root_token` isn't found
# Fails after taht too because the kv/ space has not been created yet either! Oh noes!
# Maybe move data bootstrapping to after the cluster is bootstrapped
- name: Write values
no_log: true
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "kv/data/{{ item.key }}"
data:
data:
"{{ item.value }}"
loop: "{{ hashi_vault_values | default({}) | dict2items }}"
- name: Write userpass
no_log: true
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "auth/userpass/users/{{ item.name }}"
data: '{"password": "{{ item.password }}", "policies": "{{ item.policies }}"}'
loop: "{{ vault_userpass }}"

View File

@ -0,0 +1,4 @@
---
collections:
- name: community.hashi_vault
version: 3.0.0

View File

@ -0,0 +1,4 @@
consul_values:
"blocky/whitelists/ads": |
- |
somedomain.com

117
core.tf Normal file
View File

@ -0,0 +1,117 @@
module "databases" {
source = "./databases"
}
module "blocky" {
source = "./blocky"
base_hostname = var.base_hostname
depends_on = [module.databases]
}
module "traefik" {
source = "./traefik"
consul_address = var.consul_address
base_hostname = var.base_hostname
}
module "metrics" {
source = "./metrics"
consul_address = var.consul_address
}
module "loki" {
source = "./levant"
template_path = "service.nomad"
variables = {
name = "loki"
image = "grafana/loki:2.2.1"
service_port = 3100
ingress = true
sticky_disk = true
healthcheck = "/ready"
templates = jsonencode([
{
data = file("./loki-config.yml")
dest = "/etc/loki/local-config.yaml"
}
])
}
}
resource "consul_config_entry" "loki_intent" {
name = "loki"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "grafana"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "promtail"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "syslogng-promtail"
Precedence = 9
Type = "consul"
},
]
})
}
resource "nomad_job" "syslog-ng" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/syslogng.nomad")
}
resource "consul_config_entry" "syslogng_promtail_intent" {
name = "syslogng-promtail"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "syslogng"
Precedence = 9
Type = "consul"
},
]
})
}
resource "consul_config_entry" "global_access" {
name = "*"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "traefik"
Precedence = 6
Type = "consul"
},
{
Action = "deny"
Name = "*"
Precedence = 5
Type = "consul"
},
]
})
}

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.1.1"
hashes = [
"h1:liQBgBXfQEYmwpoGZUfSsu0U0t/nhvuRZbMhaMz7wcQ=",
"zh:28bc6922e8a21334568410760150d9d413d7b190d60b5f0b4aab2f4ef52efeeb",
"zh:2d4283740e92ce1403875486cd5ff2c8acf9df28c190873ab4d769ce37db10c1",
"zh:457e16d70075eae714a7df249d3ba42c2176f19b6750650152c56f33959028d9",
"zh:49ee88371e355c00971eefee6b5001392431b47b3e760a5c649dda76f59fb8fa",
"zh:614ad3bf07155ed8a5ced41dafb09042afbd1868490a379654b3e970def8e33d",
"zh:75be7199d76987e7549e1f27439922973d1bf27353b44a593bfbbc2e3b9f698f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:888e14a24410d56b37212fbea373a3e0401d0ff8f8e4f4dd00ba8b29de9fed39",
"zh:aa261925e8b152636a0886b3a2149864707632d836e98a88dacea6cfb6302082",
"zh:ac10cefb4064b3bb63d4b0379624a416a45acf778eac0004466f726ead686196",
"zh:b1a3c8b4d5b2dc9b510eac5e9e02665582862c24eb819ab74f44d3d880246d4f",
"zh:c552e2fe5670b6d3ad9a5faf78e3a27197eeedbe2b13928d2c491fa509bc47c7",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0"
hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
]
}

View File

@ -1,204 +0,0 @@
module "authelia" {
source = "../services/service"
name = "authelia"
instance_count = 2
priority = 70
image = "authelia/authelia:4.38"
args = ["--config", "$${NOMAD_TASK_DIR}/authelia.yml"]
ingress = true
service_port = 9999
service_port_static = true
use_wesher = var.use_wesher
# metrics_port = 9959
env = {
AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/ldap_password.txt"
AUTHELIA_JWT_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
AUTHELIA_SESSION_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/session_secret.txt"
AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE = "$${NOMAD_SECRETS_DIR}/storage_encryption_key.txt"
AUTHELIA_STORAGE_MYSQL_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/mysql_password.txt"
AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/oidc_hmac_secret.txt"
AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_PRIVATE_KEY_FILE = "$${NOMAD_SECRETS_DIR}/oidc_issuer_private_key.txt"
# AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_CERTIFICATE_CHAIN_FILE = "$${NOMAD_SECRETS_DIR}/oidc_issuer_certificate_chain.txt"
}
use_mysql = true
use_ldap = true
use_redis = true
use_smtp = true
mysql_bootstrap = {
enabled = true
}
service_tags = [
# Configure traefik to add this middleware
"traefik.http.middlewares.authelia.forwardAuth.address=http://authelia.nomad:$${NOMAD_PORT_main}/api/verify?rd=https%3A%2F%2Fauthelia.${var.base_hostname}%2F",
"traefik.http.middlewares.authelia.forwardAuth.trustForwardHeader=true",
"traefik.http.middlewares.authelia.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email",
"traefik.http.middlewares.authelia-basic.forwardAuth.address=http://authelia.nomad:$${NOMAD_PORT_main}/api/verify?auth=basic",
"traefik.http.middlewares.authelia-basic.forwardAuth.trustForwardHeader=true",
"traefik.http.middlewares.authelia-basic.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email",
]
templates = [
{
data = file("${path.module}/authelia.yml")
dest = "authelia.yml"
mount = false
},
{
data = "{{ with nomadVar \"secrets/ldap\" }}{{ .admin_password }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "ldap_password.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .jwt_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "jwt_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .session_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "session_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .storage_encryption_key }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "storage_encryption_key.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .db_pass }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "mysql_password.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_hmac_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_hmac_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_issuer_private_key }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_issuer_private_key.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_issuer_certificate_chain }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_issuer_certificate_chain.txt"
mount = false
},
{
data = "{{ with nomadVar \"secrets/smtp\" }}{{ .password }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "smtp_password.txt"
mount = false
},
]
}
resource "nomad_acl_policy" "authelia" {
name = "authelia"
description = "Give access to shared authelia variables"
rules_hcl = <<EOH
namespace "default" {
variables {
path "authelia/*" {
capabilities = ["read"]
}
path "secrets/authelia/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = module.authelia.job_id
}
}
# Give access to ldap secrets
resource "nomad_acl_policy" "authelia_ldap_secrets" {
name = "authelia-secrets-ldap"
description = "Give access to LDAP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/ldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = module.authelia.job_id
}
}
# Enable oidc for nomad clients
module "nomad_oidc_client" {
source = "./oidc_client"
name = "nomad"
oidc_client_config = {
description = "Nomad"
authorization_policy = "two_factor"
redirect_uris = [
"https://nomad.${var.base_hostname}/oidc/callback",
"https://nomad.${var.base_hostname}/ui/settings/tokens",
]
scopes = ["openid", "groups"]
}
}
resource "nomad_acl_auth_method" "nomad_authelia" {
name = "authelia"
type = "OIDC"
token_locality = "global"
max_token_ttl = "1h0m0s"
default = true
config {
oidc_discovery_url = "https://authelia.${var.base_hostname}"
oidc_client_id = module.nomad_oidc_client.client_id
oidc_client_secret = module.nomad_oidc_client.secret
bound_audiences = [module.nomad_oidc_client.client_id]
oidc_scopes = [
"groups",
"openid",
]
allowed_redirect_uris = [
"https://nomad.${var.base_hostname}/oidc/callback",
"https://nomad.${var.base_hostname}/ui/settings/tokens",
]
list_claim_mappings = {
"groups" : "roles"
}
}
}
resource "nomad_acl_binding_rule" "nomad_authelia_admin" {
description = "engineering rule"
auth_method = nomad_acl_auth_method.nomad_authelia.name
selector = "\"nomad-admin\" in list.roles"
bind_type = "role"
bind_name = "admin" # acls.nomad_acl_role.admin.name
}
resource "nomad_acl_binding_rule" "nomad_authelia_deploy" {
description = "engineering rule"
auth_method = nomad_acl_auth_method.nomad_authelia.name
selector = "\"nomad-deploy\" in list.roles"
bind_type = "role"
bind_name = "deploy" # acls.nomad_acl_role.deploy.name
}

View File

@ -1,278 +0,0 @@
theme: auto
# jwt_secret: <file>
{{ with nomadVar "nomad/jobs" }}
default_redirection_url: https://authelia.{{ .base_hostname }}/
{{ end }}
## Set the default 2FA method for new users and for when a user has a preferred method configured that has been
## disabled. This setting must be a method that is enabled.
## Options are totp, webauthn, mobile_push.
default_2fa_method: ""
server:
host: 0.0.0.0
port: {{ env "NOMAD_PORT_main" }}
disable_healthcheck: false
log:
## Level of verbosity for logs: info, debug, trace.
level: debug
format: json
telemetry:
metrics:
enabled: false
# address: '0.0.0.0:{{ env "NOMAD_PORT_metrics" }}'
totp:
disable: false
issuer: {{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
digits: 6
## The TOTP algorithm to use.
## It is CRITICAL you read the documentation before changing this option:
## https://www.authelia.com/c/totp#algorithm
algorithm: sha1
webauthn:
disable: false
timeout: 60s
display_name: {{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
user_verification: preferred
duo_api:
disable: true
# hostname:
# integration_key:
# secret_key:
# enable_self_enrollment: false
authentication_backend:
disable_reset_password: false
## Password Reset Options.
password_reset:
## External reset password url that redirects the user to an external reset portal. This disables the internal reset
## functionality.
# TODO: not sure if this is needed, probably not?
custom_url: ""
refresh_interval: 5m
ldap:
implementation: custom
# stunnel url
url: ldap://127.0.0.1:389
timeout: 5s
# TODO: Maybe use stunnel for this
start_tls: false
base_dn: {{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}
additional_users_dn: ou=people
additional_groups_dn: ou=groups
username_attribute: uid
group_name_attribute: cn
mail_attribute: mail
display_name_attribute: displayName
# To allow sign in both with username and email, one can use a filter like
# (&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=person))
users_filter: "(&({username_attribute}={input})(objectClass=person))"
# Only supported filter by lldap right now
groups_filter: (member={dn})
## The username and password of the admin user.
{{ with nomadVar "secrets/ldap" }}
user: uid={{ .admin_user }},ou=people,{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}
{{ end }}
# password set using secrets file
# password: <secret>
password_policy:
standard:
enabled: false
min_length: 8
max_length: 0
require_uppercase: true
require_lowercase: true
require_number: true
require_special: true
zxcvbn:
enabled: false
min_score: 3
##
## Access Control Configuration
##
## Access control is a list of rules defining the authorizations applied for one resource to users or group of users.
##
## If 'access_control' is not defined, ACL rules are disabled and the 'bypass' rule is applied, i.e., access is allowed
## to anyone. Otherwise restrictions follow the rules defined.
##
## Note: One can use the wildcard * to match any subdomain.
## It must stand at the beginning of the pattern. (example: *.mydomain.com)
##
## Note: You must put patterns containing wildcards between simple quotes for the YAML to be syntactically correct.
##
## Definition: A 'rule' is an object with the following keys: 'domain', 'subject', 'policy' and 'resources'.
##
## - 'domain' defines which domain or set of domains the rule applies to.
##
## - 'subject' defines the subject to apply authorizations to. This parameter is optional and matching any user if not
## provided. If provided, the parameter represents either a user or a group. It should be of the form
## 'user:<username>' or 'group:<groupname>'.
##
## - 'policy' is the policy to apply to resources. It must be either 'bypass', 'one_factor', 'two_factor' or 'deny'.
##
## - 'resources' is a list of regular expressions that matches a set of resources to apply the policy to. This parameter
## is optional and matches any resource if not provided.
##
## Note: the order of the rules is important. The first policy matching (domain, resource, subject) applies.
access_control:
## Default policy can either be 'bypass', 'one_factor', 'two_factor' or 'deny'. It is the policy applied to any
## resource if there is no policy to be applied to the user.
default_policy: deny
networks:
- name: internal
networks:
- 192.168.1.0/24
- 192.168.2.0/24
- 192.168.10.0/24
- name: VPN
networks: 192.168.5.0/24
rules:
## Allow favicons on internal network
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
resources:
- '^/apple-touch-icon-precomposed\.png$'
- '^/assets/safari-pinned-tab\.svg$'
- '^/apple-touch-icon-180x180\.png$'
- '^/apple-touch-icon\.png$'
- '^/favicon\.ico$'
networks:
- internal
policy: bypass
{{ range nomadVarList "authelia/access_control/service_rules" }}{{ with nomadVar .Path }}
- domain: '{{ .name }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
{{ .rule.Value | indent 6 }}
{{ end }}{{ end }}
## Rules applied to everyone
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
networks:
- internal
policy: one_factor
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
policy: two_factor
- domain:
# TODO: Drive these from Nomad variables
- 'secure.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
policy: two_factor
session:
## The name of the session cookie.
name: authelia_session
domain: {{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
# Stored in a secrets file
# secret: <in file>
expiration: 1h
inactivity: 5m
remember_me_duration: 1M
redis:
host: 127.0.0.1
port: 6379
# username: authelia
# password: authelia
# database_index: 0
maximum_active_connections: 8
minimum_idle_connections: 0
regulation:
max_retries: 3
find_time: 2m
ban_time: 5m
##
## Storage Provider Configuration
##
## The available providers are: `local`, `mysql`, `postgres`. You must use one and only one of these providers.
storage:
# encryption_key: <in file>
##
## MySQL / MariaDB (Storage Provider)
##
mysql:
host: 127.0.0.1
port: 3306
{{ with nomadVar "nomad/jobs/authelia" }}
database: {{ .db_name }}
username: {{ .db_user }}
# password: <in_file>
{{- end }}
timeout: 5s
##
## Notification Provider
##
## Notifications are sent to users when they require a password reset, a Webauthn registration or a TOTP registration.
## The available providers are: filesystem, smtp. You must use only one of these providers.
notifier:
## You can disable the notifier startup check by setting this to true.
disable_startup_check: true
{{ with nomadVar "secrets/smtp" }}
smtp:
host: {{ .server }}
port: {{ .port }}
username: {{ .user }}
# password: <in file>
{{- end }}
{{ with nomadVar "nomad/jobs/authelia" }}
sender: "{{ .email_sender }}"
## Subject configuration of the emails sent. {title} is replaced by the text from the notifier.
subject: "[Authelia] {title}"
## This address is used during the startup check to verify the email configuration is correct.
## It's not important what it is except if your email server only allows local delivery.
startup_check_address: test@iamthefij.com
{{- end }}
identity_providers:
oidc:
# hmac_secret: <file>
# issuer_private_key: <file>
clients:
{{ range nomadVarList "authelia/access_control/oidc_clients" -}}
{{- $name := (sprig_last (sprig_splitList "/" .Path)) -}}
{{ "-" | indent 6 }}
{{ with nomadVar .Path }}
{{- $im := .ItemsMap -}}
{{- $im = sprig_set $im "redirect_uris" (.redirect_uris.Value | parseYAML) -}}
{{- $im = sprig_set $im "scopes" (.scopes.Value | parseYAML) -}}
{{- with nomadVar (printf "secrets/authelia/%s" $name) -}}
{{- $im = sprig_set $im "secret" .secret_hash.Value -}}
{{- end -}}
{{ $im | toYAML | indent 8 }}
{{ end }}
{{ end }}

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.0.0"
hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}

View File

@ -1,369 +0,0 @@
job "blocky" {
datacenters = ["dc1"]
type = "service"
priority = 100
constraint {
distinct_hosts = true
}
update {
max_parallel = 1
auto_revert = true
min_healthy_time = "60s"
healthy_deadline = "5m"
}
group "blocky" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/12023
count = 2
network {
mode = "bridge"
port "dns" {
static = "53"
}
port "api" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = "4000"
}
dns {
# Set expclicit DNS servers because tasks, by default, use this task
servers = [
"192.168.2.1",
]
}
}
service {
name = "blocky-dns"
provider = "nomad"
port = "dns"
}
service {
name = "blocky-api"
provider = "nomad"
port = "api"
tags = [
"prometheus.scrape",
"traefik.enable=true",
"traefik.http.routers.blocky-api.entryPoints=websecure",
]
check {
name = "api-health"
port = "api"
type = "http"
path = "/"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
}
task "blocky" {
driver = "docker"
config {
image = "ghcr.io/0xerr0r/blocky:v0.24"
args = ["-c", "$${NOMAD_TASK_DIR}/config.yml"]
ports = ["dns", "api"]
}
action "refresh-lists" {
command = "/app/blocky"
args = ["lists", "refresh"]
}
action "healthcheck" {
command = "/app/blocky"
args = ["healthcheck"]
}
resources {
cpu = 50
memory = 75
memory_max = 150
}
template {
data = <<EOF
${file("${module_path}/config.yml")}
EOF
destination = "$${NOMAD_TASK_DIR}/config.yml"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ range nomadServices }}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") .Name -}}
{{ .Address }} {{ .Name }}.nomad
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/nomad.hosts"
change_mode = "noop"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/user" }}
{{ with nomadVar "blocky_lists/user" -}}
{{ .block_list.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/block"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "30s"
max = "1m"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/user" }}
{{ with nomadVar "blocky_lists/user" -}}
{{ .allow_list.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/allow"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "30s"
max = "1m"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .smarttv_regex.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/smarttv-regex.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .wemo.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/wemo.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .sonos.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/sonos.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 20
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[dns_server]
# Dummy server to keep stunnel running if no mysql is present
accept = 8053
connect = 127.0.0.1:53
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
[mysql_client]
client = yes
accept = 127.0.0.1:3306
connect = {{ .Address }}:{{ .Port }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
{{- end }}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}}
[redis_client]
client = yes
accept = 127.0.0.1:6379
connect = {{ .Address }}:{{ .Port }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/blocky" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
template {
data = <<EOF
{{- with nomadVar "nomad/jobs/blocky/blocky/stunnel" -}}{{ .redis_stunnel_psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
}
task "mysql-bootstrap" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "mariadb:10"
args = [
"/bin/bash",
"-c",
"/usr/bin/timeout 2m /bin/bash -c \"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do echo 'Retry in 10s'; sleep 10; done\" || true",
]
}
template {
data = <<EOF
[client]
host=127.0.0.1
port=3306
user=root
{{ with nomadVar "secrets/mysql" }}
password={{ .mysql_root_password }}
{{ end }}
EOF
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/blocky" }}{{ if .db_name -}}
{{ $db_name := .db_name }}
CREATE DATABASE IF NOT EXISTS `{{ $db_name }}`;
CREATE USER IF NOT EXISTS '{{ .db_user }}'@'%' IDENTIFIED BY '{{ .db_pass }}';
GRANT ALL ON `{{ $db_name }}`.* to '{{ .db_user }}'@'%';
{{ with nomadService "grafana" }}{{ with nomadVar "nomad/jobs" -}}
-- Grant grafana read_only user access to db
GRANT SELECT ON `{{ $db_name }}`.* to '{{ .db_user_ro }}'@'%';
{{ end }}{{ end -}}
{{ else -}}
SELECT 'NOOP';
{{ end -}}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql"
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -1,88 +0,0 @@
resource "nomad_job" "blocky" {
jobspec = templatefile("${path.module}/blocky.nomad", {
use_wesher = var.use_wesher,
module_path = path.module,
})
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "blocky_mysql_bootstrap_secrets" {
name = "blocky-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "mysql-bootstrap"
}
}
resource "random_password" "blocky_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "blocky_mysql_psk" {
path = "secrets/mysql/allowed_psks/blocky"
items = {
psk = "blocky:${resource.random_password.blocky_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "blocky_mysql_psk" {
name = "blocky-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/blocky" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "stunnel"
}
}
resource "nomad_variable" "blocky_lists_terraform" {
path = "blocky_lists/terraform"
items = {
smarttv_regex = file("${path.module}/list-smarttv-regex.txt")
wemo = file("${path.module}/list-wemo.txt")
sonos = file("${path.module}/list-sonos.txt")
}
}
resource "nomad_acl_policy" "blocky_lists" {
name = "blocky-lists"
description = "Give access Blocky lists"
rules_hcl = <<EOH
namespace "default" {
variables {
path "blocky_lists/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "blocky"
}
}

View File

@ -1,142 +0,0 @@
ports:
dns: 53
http: 4000
# I must have ip v6 blocked or something
connectIPVersion: v4
bootstrapDns:
- upstream: 1.1.1.1
- upstream: 1.0.0.1
- upstream: 9.9.9.9
- upstream: 149.112.112.112
upstreams:
init:
strategy: fast
groups:
default:
- https://dns.quad9.net/dns-query
- tcp-tls:dns.quad9.net
- https://one.one.one.one/dns-query
- tcp-tls:one.one.one.one
# cloudflare:
# - 1.1.1.1
# - 1.0.0.1
# - 2606:4700:4700::1111
# - 2606:4700:4700::1001
# - https://one.one.one.one/dns-query
# - tcp-tls:one.one.one.one
# quad9:
# - 9.9.9.9
# - 149.112.112.112
# - 2620:fe::fe
# - 2620:fe::9
# - https://dns.quad9.net/dns-query
# - tcp-tls:dns.quad9.net
# quad9-secured:
# - 9.9.9.11
# - 149.112.112.11
# - 2620:fe::11
# - 2620:fe::fe:11
# - https://dns11.quad9.net/dns-query
# - tcp-tls:dns11.quad9.net
# quad9-unsecured:
# - 9.9.9.10
# - 149.112.112.10
# - 2620:fe::10
# - 2620:fe::fe:10
# - https://dns10.quad9.net/dns-query
# - tcp-tls:dns10.quad9.net
conditional:
fallbackUpstream: false
mapping:
home.arpa: 192.168.2.1
in-addr.arpa: 192.168.2.1
iot: 192.168.2.1
local: 192.168.2.1
thefij: 192.168.2.1
.: 192.168.2.1
hostsFile:
sources:
- {{ env "NOMAD_TASK_DIR" }}/nomad.hosts
hostsTTL: 30s
loading:
refreshPeriod: 30s
clientLookup:
upstream: 192.168.2.1
blocking:
blackLists:
ads:
- https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
- http://sysctl.org/cameleon/hosts
- https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt
- https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
# - https://hosts-file.net/ad_servers.txt
iot:
- https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV.txt
- {{ env "NOMAD_TASK_DIR" }}/smarttv-regex.txt
- {{ env "NOMAD_TASK_DIR" }}/wemo.txt
- {{ env "NOMAD_TASK_DIR" }}/sonos.txt
antisocial:
- |
facebook.com
instagram.com
reddit.com
twitter.com
youtube.com
custom:
- {{ env "NOMAD_TASK_DIR" }}/block
whiteLists:
custom:
- {{ env "NOMAD_TASK_DIR" }}/allow
clientGroupsBlock:
default:
- ads
- custom
192.168.3.1/24:
- ads
- iot
- custom
customDNS:
customTTL: 1h
mapping:
{{ with nomadVar "nomad/jobs/blocky" }}{{ .mappings.Value | indent 4 }}{{ end }}
# Catch all at top domain to traefik
{{ with nomadService "traefik" -}}
{{- $last := len . | subtract 1 -}}
{{- $services := . -}}
{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}: {{ range $i := loop $last -}}
{{- with index $services $i }}{{ .Address }},{{ end -}}
{{- end -}}
{{- with index . $last }}{{ .Address }}{{ end -}}
{{- end }}
prometheus:
enable: true
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}}
redis:
address: 127.0.0.1:6379
# password: ""
# database: 0
connectionAttempts: 10
connectionCooldown: 3s
{{ end -}}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
{{ with nomadVar "nomad/jobs/blocky" -}}
queryLog:
type: mysql
target: {{ .db_user }}:{{ .db_pass }}@tcp(127.0.0.1:3306)/{{ .db_name }}?charset=utf8mb4&parseTime=True&loc=Local
logRetentionDays: 14
{{ end -}}
{{ end -}}

View File

@ -1,13 +0,0 @@
# From: https://perflyst.github.io/PiHoleBlocklist/regex.list
# Title: Perflyst's SmartTV Blocklist for Pi-hole - RegEx extension
# Version: 13July2023v1
# Samsung
/(^|\.)giraffic\.com$/
/(^|\.)internetat\.tv$/
/(^|\.)pavv\.co\.kr$/
/(^|\.)samsungcloudsolution\.net$/
/(^|\.)samsungelectronics\.com$/
/(^|\.)samsungrm\.net$/
# /(^|\.)samsungotn\.net$/ # prevents updates
# /(^|\.)samsungcloudcdn\.com$/ # prevents updates
# /(^|\.)samsungcloudsolution\.com$/ # prevents internet connection

View File

@ -1,2 +0,0 @@
# Block Sonos devices from phoning home and allowing remote access
/(^|\.)sonos\.com$/

View File

@ -1,8 +0,0 @@
# Remote commands
api.xbcs.net
# Firmware updates
fw.xbcs.net
# TURN service
nat.wemo2.com
# Connectivity checks
heartbeat.xwemo.com

View File

@ -1,5 +0,0 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

View File

@ -1,48 +0,0 @@
job "ddclient" {
datacenters = ["dc1"]
type = "service"
group "ddclient" {
task "ddclient" {
driver = "docker"
config {
image = "ghcr.io/linuxserver/ddclient:v3.10.0-ls104"
mount {
type = "bind"
source = "secrets/ddclient.conf"
target = "/config/ddclient.conf"
}
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/ddclient" -}}
daemon=900
ssl=yes
use=web
web=api.myip.com
protocol=cloudflare,
zone={{ .zone }},
ttl=1,
login=token,
password={{ .domain_ddclient }}
{{ .domain }}
{{- end }}
EOH
destination = "secrets/ddclient.conf"
change_mode = "restart"
}
resources {
cpu = 50
memory = 50
memory_max = 100
}
}
}
}

View File

@ -1,143 +0,0 @@
job "exporters" {
datacenters = ["dc1"]
type = "service"
priority = 55
constraint {
distinct_hosts = true
}
group "promtail" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/1202
count = 2
network {
mode = "bridge"
port "promtail" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 9080
}
}
service {
name = "promtail"
provider = "nomad"
port = "promtail"
meta {
nomad_dc = "$${NOMAD_DC}"
nomad_node_name = "$${node.unique.name}"
}
tags = [
"prometheus.scrape",
]
}
task "promtail" {
driver = "docker"
config {
image = "grafana/promtail:3.3.0"
args = ["-config.file=$${NOMAD_TASK_DIR}/promtail.yml"]
ports = ["promtail"]
# Bind mount host machine-id and log directories
mount {
type = "bind"
source = "/etc/machine-id"
target = "/etc/machine-id"
readonly = true
}
mount {
type = "bind"
source = "/var/log/journal/"
target = "/var/log/journal/"
readonly = true
}
mount {
type = "bind"
source = "/run/log/journal/"
target = "/run/log/journal/"
readonly = true
}
# mount {
# type = "bind"
# source = "/var/log/audit"
# target = "/var/log/audit"
# readonly = true
# }
}
template {
data = <<EOF
---
server:
http_listen_address: 0.0.0.0
http_listen_port: 9080
clients:
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "loki" -}}
- url: http://{{ .Address }}:{{ .Port }}/loki/api/v1/push
{{- end }}
scrape_configs:
- job_name: journal
journal:
json: false
max_age: 12h
path: /var/log/journal
labels:
job: systemd-journal
relabel_configs:
- source_labels: ['__journal__systemd_unit']
target_label: unit
- source_labels: ['__journal__hostname']
target_label: hostname
- source_labels: ['__journal__transport']
target_label: journal_transport
# Docker log labels
- source_labels: ['__journal_syslog_identifier']
target_label: syslog_identifier
- source_labels: ['__journal_image_name']
target_label: docker_image_name
- source_labels: ['__journal_container_name']
target_label: docker_container_name
- source_labels: ['__journal_container_id']
target_label: docker_container_id
- source_labels: ['__journal_com_docker_compose_project']
target_label: docker_compose_project
- source_labels: ['__journal_com_docker_compose_service']
target_label: docker_compose_service
- source_labels: ['__journal_com_hashicorp_nomad_alloc_id']
target_label: nomad_alloc_id
- source_labels: ['__journal_com_hashicorp_nomad_job_id']
target_label: nomad_job_id
- source_labels: ['__journal_com_hashicorp_nomad_job_name']
target_label: nomad_job_name
- source_labels: ['__journal_com_hashicorp_nomad_node_name']
target_label: nomad_node_name
- source_labels: ['__journal_com_hashicorp_nomad_group_name']
target_label: nomad_group_name
- source_labels: ['__journal_com_hashicorp_nomad_task_name']
target_label: nomad_task_name
EOF
destination = "$${NOMAD_TASK_DIR}/promtail.yml"
}
resources {
cpu = 50
memory = 100
}
}
}
}

View File

@ -1,5 +0,0 @@
resource "nomad_job" "exporters" {
jobspec = templatefile("${path.module}/exporters.nomad", {
use_wesher = var.use_wesher,
})
}

View File

@ -1,300 +0,0 @@
job "grafana" {
datacenters = ["dc1"]
group "grafana" {
count = 1
network {
mode = "bridge"
port "web" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 3000
}
}
ephemeral_disk {
migrate = true
sticky = true
}
service {
name = "grafana"
provider = "nomad"
port = "web"
tags = [
"traefik.enable=true",
"traefik.http.routers.grafana.entryPoints=websecure",
]
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/grafana" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
}
task "mysql-bootstrap" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "mariadb:10"
args = [
"/usr/bin/timeout",
"20m",
"/bin/bash",
"-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do echo 'Retry in 10s'; sleep 10; done",
]
}
template {
data = <<EOF
[client]
host=127.0.0.1
port=3306
user=root
{{ with nomadVar "secrets/mysql" -}}
password={{ .mysql_root_password }}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/grafana" -}}
{{ if .db_name -}}
CREATE DATABASE IF NOT EXISTS `{{ .db_name }}`;
CREATE USER IF NOT EXISTS '{{ .db_user }}'@'%' IDENTIFIED BY '{{ .db_pass }}';
GRANT ALL ON `{{ .db_name }}`.* to '{{ .db_user }}'@'%';
-- Create Read Only user
CREATE USER IF NOT EXISTS '{{ .db_user_ro }}'@'%' IDENTIFIED BY '{{ .db_pass_ro }}';
{{ else -}}
SELECT 'NOOP';
{{ end -}}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql"
}
resources {
cpu = 50
memory = 50
}
}
task "grafana" {
driver = "docker"
config {
image = "grafana/grafana:10.0.10"
args = ["--config", "$${NOMAD_ALLOC_DIR}/config/grafana.ini"]
ports = ["web"]
}
env = {
"GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel,natel-discrete-panel",
"GF_PATHS_CONFIG" = "$${NOMAD_ALLOC_DIR}/config/grafana.ini",
"GF_PATHS_PROVISIONING" = "$${NOMAD_ALLOC_DIR}/config/provisioning",
}
template {
data = <<EOF
{{ with nomadVar "secrets/smtp" -}}
GF_SMTP_USER={{ .user }}
GF_SMTP_PASSWORD={{ .password }}
{{ end -}}
{{ with nomadVar "nomad/jobs/grafana" -}}
GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }}
GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .minio_access_key }}
GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .minio_secret_key }}
GRAFANA_ALERT_EMAIL_ADDRESSES={{ .alert_email_addresses }}
{{ if .db_name -}}
# Database storage
GF_DATABASE_TYPE=mysql
GF_DATABASE_HOST=127.0.0.1:3306
GF_DATABASE_NAME={{ .db_name }}
GF_DATABASE_USER={{ .db_user }}
GF_DATABASE_PASSWORD={{ .db_pass }}
{{ end -}}
SLACK_BOT_URL={{ .slack_bot_url }}
SLACK_BOT_TOKEN={{ .slack_bot_token }}
SLACK_HOOK_URL={{ .slack_hook_url }}
{{ end -}}
{{ with nomadVar "secrets/authelia/grafana" -}}
GF_AUTH_GENERIC_OAUTH_CLIENT_ID={{ .client_id }}
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET={{ .secret }}
{{ end -}}
EOF
env = true
destination = "secrets/conf.env"
}
resources {
cpu = 100
memory = 200
}
}
task "grafana-reprovisioner" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/startup.sh"]
}
resources {
cpu = 50
memory = 50
}
action "reloadnow" {
command = "/local/reload_config.sh"
}
env = {
LOG_FILE = "/var/log/grafana_reloader.log"
}
template {
data = <<EOF
#! /bin/sh
apk add curl
touch "$LOG_FILE"
exec tail -f "$LOG_FILE"
EOF
perms = "777"
destination = "$${NOMAD_TASK_DIR}/startup.sh"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/grafana" -}}
GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }}
{{ end -}}
EOF
env = true
destination = "$${NOMAD_SECRETS_DIR}/conf.env"
}
template {
data = <<EOF
#! /bin/sh
exec > "$LOG_FILE"
exec 2>&1
GRAFANA_URL=http://127.0.0.1:3000
echo "Reload dashboards"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/dashboards/reload
echo "Reload datasources"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/datasources/reload
echo "Reload plugins"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/plugins/reload
echo "Reload notifications"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/notifications/reload
echo "Reload access-control"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/access-control/reload
echo "Reload alerting"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/alerting/reload
EOF
change_mode = "noop"
perms = "777"
destination = "$${NOMAD_TASK_DIR}/reload_config.sh"
}
%{ for config_file in fileset(join("/", [module_path, "grafana"]), "**") ~}
template {
data = <<EOF
${file(join("/", [module_path, "grafana", config_file]))}
EOF
destination = "$${NOMAD_ALLOC_DIR}/config/${config_file}"
perms = 777
# Set owner to grafana uid
# uid = 472
# Change template delimeter for dashboard files that use json and have double curly braces and square braces
%{ if endswith(config_file, ".json") ~}
left_delimiter = "<<<<"
right_delimiter = ">>>>"
%{ endif }
change_mode = "script"
change_script {
command = "/local/reload_config.sh"
}
}
%{ endfor }
}
task "grafana-image-renderer" {
driver = "docker"
constraint {
attribute = "$${attr.cpu.arch}"
value = "amd64"
}
config {
image = "grafana/grafana-image-renderer:3.6.1"
ports = ["renderer"]
}
env = {
"RENDERING_MODE" = "clustered"
"RENDERING_CLUSTERING_MODE" = "browser"
"RENDERING_CLUSTERING_MAX_CONCURRENCY" = 5
"RENDERING_CLUSTERING_TIMEOUT" = 30
}
}
}
}

View File

@ -1,117 +0,0 @@
resource "nomad_job" "grafana" {
jobspec = templatefile("${path.module}/grafana.nomad", {
module_path = path.module
use_wesher = var.use_wesher
})
depends_on = [nomad_job.prometheus]
}
resource "nomad_acl_policy" "grafana_smtp_secrets" {
name = "grafana-secrets-smtp"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/smtp" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "grafana"
}
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "grafana_mysql_bootstrap_secrets" {
name = "grafana-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "mysql-bootstrap"
}
}
resource "random_password" "grafana_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "grafana_mysql_psk" {
path = "secrets/mysql/allowed_psks/grafana"
items = {
psk = "grafana:${resource.random_password.grafana_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "grafana_mysql_psk" {
name = "grafana-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/grafana" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "stunnel"
}
}
module "grafana_oidc" {
source = "./oidc_client"
name = "grafana"
oidc_client_config = {
description = "Grafana"
scopes = [
"openid",
"groups",
"email",
"profile",
]
redirect_uris = [
"https://grafana.thefij.rocks/login/generic_oauth",
]
}
job_acl = {
job_id = "grafana"
group = "grafana"
task = "grafana"
}
}
# resource "nomad_variable" "grafana_config" {
# for_each = fileset("${path.module}/grafana", "**")
#
# path = "nomad/jobs/grafana/${replace(each.key, ".", "_")}"
# items = {
# path = "${each.key}"
# value = file("${path.module}/grafana/${each.key}")
# left_delimiter = endswith(each.key, ".json") ? "<<<<" : "{{"
# right_delimiter = endswith(each.key, ".json") ? ">>>>" : "}}"
# }
# }

View File

@ -1,882 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"description": "Query report for blocky (MySQL)",
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": 14980,
"graphTooltip": 0,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
},
"decimals": 0,
"mappings": [],
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 0,
"y": 0
},
"id": 14,
"links": [],
"options": {
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"values": [
"value"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "8.1.2",
"repeatDirection": "v",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "time_series",
"group": [],
"metricColumn": "none",
"rawQuery": true,
"rawSql": "SELECT t.response_type, t.request_Ts as time, count(*) as cnt from log_entries t \n WHERE $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0)\n group by t.response_type\n order by t.request_Ts",
"refId": "A",
"select": [
[
{
"params": [
"value"
],
"type": "column"
}
]
],
"timeColumn": "time",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Query count by response type",
"transformations": [],
"type": "piechart"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
},
"decimals": 0,
"mappings": [],
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 6,
"y": 0
},
"id": 16,
"links": [],
"options": {
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"values": [
"value"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "time_series",
"group": [],
"metricColumn": "none",
"rawQuery": true,
"rawSql": "SELECT t.request_ts AS time,\n case when t.reason like 'BLOCKED%' then SUBSTRING_INDEX(SUBSTRING_INDEX(t.reason,'(',-1), ')',1) else '' end AS metric,\n count(t.reason) AS cnt\nFROM log_entries t\nWHERE t.response_type ='BLOCKED'\n AND $__timeFilter(t.request_Ts)\n AND t.client_name in ($client_name)\n AND (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0)\nGROUP BY 2\nORDER BY time",
"refId": "A",
"select": [
[
{
"params": [
"duration_ms"
],
"type": "column"
}
]
],
"table": "log_entries",
"timeColumn": "request_ts",
"timeColumnType": "timestamp",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Blocked by Blacklist",
"type": "piechart"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"displayName": "$__cell_1",
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 13,
"links": [],
"options": {
"displayMode": "gradient",
"minVizHeight": 10,
"minVizWidth": 0,
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": true
},
"showUnfilled": true,
"text": {}
},
"pluginVersion": "9.2.4",
"repeatDirection": "v",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "table",
"group": [],
"metricColumn": "f",
"rawQuery": true,
"rawSql": "SELECT t.request_Ts as time, t.client_name as metric, count(*) as cnt from log_entries t \n WHERE $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0)\n group by t.client_name\n order by 3 desc",
"refId": "A",
"select": [
[
{
"params": [
"value"
],
"type": "column"
}
]
],
"timeColumn": "time",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Query count by client",
"transformations": [],
"type": "bargauge"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "Top 20 effective top level domain plus one more label",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"displayName": "$__cell_0",
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"id": 11,
"options": {
"displayMode": "gradient",
"minVizHeight": 10,
"minVizWidth": 0,
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": true
},
"showUnfilled": true
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "table",
"group": [],
"hide": false,
"metricColumn": "question_name",
"rawQuery": true,
"rawSql": "SELECT t.effective_tldp as metric, count(*) as value from log_entries t \nWHERE $__timeFilter(t.request_Ts) \n and t.response_type in ($response_type) \n and t.client_name in ($client_name) \n and (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0) \n group by t.effective_tldp order by count(*) desc limit 20",
"refId": "A",
"select": [
[
{
"params": [
"value"
],
"type": "column"
}
]
],
"table": "log_entries",
"timeColumn": "request_ts",
"where": []
}
],
"title": "Top 20 effective TLD+1",
"type": "bargauge"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"displayName": "$__cell_0",
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 8
},
"id": 8,
"options": {
"displayMode": "gradient",
"minVizHeight": 10,
"minVizWidth": 0,
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": true
},
"showUnfilled": true
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "table",
"group": [],
"hide": false,
"metricColumn": "question_name",
"rawQuery": true,
"rawSql": "SELECT t.question_name as metric, count(*) as value from log_entries t \n WHERE $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0) \n group by t.question_name order by count(*) desc limit 20",
"refId": "A",
"select": [
[
{
"params": [
"value"
],
"type": "column"
}
]
],
"table": "log_entries",
"timeColumn": "request_ts",
"where": []
}
],
"title": "Top 20 queried domains",
"type": "bargauge"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 24,
"x": 0,
"y": 16
},
"id": 12,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "time_series",
"group": [],
"metricColumn": "none",
"rawQuery": true,
"rawSql": "SELECT\n $__timeGroupAlias(t.request_Ts, '30m'),\n t.client_name as metric,\n count(*) as c\nFROM log_entries t\nWHERE\n $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0)\nGROUP BY 1,2\nORDER BY 1",
"refId": "A",
"select": [
[
{
"params": [
"duration_ms"
],
"type": "column"
}
]
],
"table": "log_entries",
"timeColumn": "request_ts",
"timeColumnType": "timestamp",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Queries number per client (30m)",
"type": "timeseries"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "dtdurationms"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 23
},
"id": 10,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "time_series",
"group": [],
"metricColumn": "none",
"rawQuery": true,
"rawSql": "SELECT\n UNIX_TIMESTAMP(t.request_Ts) as time,\n t.duration_ms\nFROM log_entries t\nWHERE\n $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0)\nORDER BY request_ts",
"refId": "A",
"select": [
[
{
"params": [
"duration_ms"
],
"type": "column"
}
]
],
"table": "log_entries",
"timeColumn": "request_ts",
"timeColumnType": "timestamp",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Query duration",
"type": "timeseries"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "Last 100 queries, newest on top",
"fieldConfig": {
"defaults": {
"custom": {
"displayMode": "auto",
"filterable": false,
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "time"
},
"properties": [
{
"id": "unit",
"value": "dateTimeAsIsoNoDateIfToday"
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 31
},
"id": 4,
"options": {
"footer": {
"fields": "",
"reducer": [
"sum"
],
"show": false
},
"showHeader": true
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "table",
"group": [],
"metricColumn": "none",
"rawQuery": true,
"rawSql": "SELECT UNIX_TIMESTAMP(t.request_Ts) as \"time\", \n t.client_ip as \"client IP\", \n t.client_name as \"client name\", \n t.duration_ms as \"duration in ms\", \n t.response_type as \"response type\", \n t.question_type as \"question type\", \n t.question_name as \"question name\", \n t.effective_tldp as \"effective TLD+1\", \n t.answer as \"answer\" from log_entries t \n WHERE $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0) \n order by t.request_Ts desc limit 100",
"refId": "A",
"select": [
[
{
"params": [
"value"
],
"type": "column"
}
]
],
"timeColumn": "time",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Last queries",
"type": "table"
}
],
"refresh": "",
"schemaVersion": 37,
"style": "dark",
"tags": [],
"templating": {
"list": [
{
"allValue": "",
"current": {
"selected": false,
"text": "All",
"value": "$__all"
},
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"definition": "select distinct client_name from log_entries",
"hide": 0,
"includeAll": true,
"label": "Client name",
"multi": true,
"name": "client_name",
"options": [],
"query": "select distinct client_name from log_entries",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"current": {
"selected": true,
"text": [
"All"
],
"value": [
"$__all"
]
},
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"definition": "select distinct response_type from log_entries",
"hide": 0,
"includeAll": true,
"label": "Response type",
"multi": true,
"name": "response_type",
"options": [],
"query": "select distinct response_type from log_entries",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"current": {
"selected": false,
"text": "",
"value": ""
},
"hide": 0,
"label": "Domain (contains)",
"name": "question",
"options": [
{
"selected": true,
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
}
]
},
"time": {
"from": "now-24h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Blocky query",
"uid": "AVmWSVWgz",
"version": 1,
"weekStart": ""
}

File diff suppressed because it is too large Load Diff

View File

@ -1,411 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"fieldConfig": {
"defaults": {
"color": {
"fixedColor": "rgb(31, 120, 193)",
"mode": "fixed"
},
"mappings": [
{
"options": {
"match": "null",
"result": {
"text": "N/A"
}
},
"type": "special"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "#d44a3a",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": 0
},
{
"color": "#299c46",
"value": 1
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 4,
"x": 0,
"y": 0
},
"id": 2,
"links": [],
"maxDataPoints": 100,
"options": {
"colorMode": "none",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"expr": "count(minitor_monitor_up_count)",
"format": "time_series",
"instant": true,
"intervalFactor": 2,
"refId": "A"
}
],
"title": "Total Monitors",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"decimals": 0,
"mappings": [
{
"options": {
"match": "null",
"result": {
"text": "N/A"
}
},
"type": "special"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "#299c46",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": 1
},
{
"color": "#d44a3a",
"value": 2
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 4,
"x": 4,
"y": 0
},
"id": 6,
"links": [],
"maxDataPoints": 100,
"options": {
"colorMode": "background",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"expr": "count(minitor_monitor_up_count)-count(minitor_monitor_up_count>=1)",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
"refId": "A"
}
],
"title": "Total Down Services",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"fillOpacity": 70,
"lineWidth": 0,
"spanNulls": 900000
},
"mappings": [
{
"options": {
"0": {
"index": 1,
"text": "Down"
},
"1": {
"index": 0,
"text": "Up"
}
},
"type": "value"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "red",
"value": null
},
{
"color": "red",
"value": 0
},
{
"color": "green",
"value": 1
}
]
},
"unit": "bool_on_off"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 14,
"x": 8,
"y": 0
},
"id": 4,
"links": [],
"options": {
"alignValue": "left",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": false
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "never",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"editorMode": "code",
"exemplar": false,
"expr": "sum(minitor_monitor_up_count) by (monitor)",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
"legendFormat": "{{monitor}}",
"range": true,
"refId": "A"
}
],
"title": "Service Status",
"type": "state-timeline"
},
{
"columns": [],
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"fontSize": "100%",
"gridPos": {
"h": 11,
"w": 22,
"x": 0,
"y": 7
},
"id": 8,
"links": [],
"scroll": true,
"showHeader": true,
"sort": {
"col": 0,
"desc": true
},
"styles": [
{
"alias": "Time",
"align": "auto",
"dateFormat": "YYYY-MM-DD HH:mm:ss",
"pattern": "Time",
"type": "date"
},
{
"alias": "Status",
"align": "auto",
"colorMode": "cell",
"colors": [
"#F2495C",
"#F2495C",
"rgba(50, 172, 45, 0.97)"
],
"decimals": 0,
"link": false,
"mappingType": 1,
"pattern": "Value",
"thresholds": [
"0",
"1"
],
"type": "string",
"unit": "short",
"valueMaps": [
{
"text": "Ok",
"value": "1"
},
{
"text": "Not Ok",
"value": "0"
}
]
}
],
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"expr": "minitor_monitor_up_count",
"format": "time_series",
"instant": true,
"intervalFactor": 1,
"legendFormat": "{{monitor}}",
"refId": "A"
}
],
"title": "Monitor status",
"transform": "timeseries_to_rows",
"type": "table-old"
}
],
"refresh": "30s",
"schemaVersion": 37,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Minitor Monitor",
"uid": "qoE5Hrxiz",
"version": 1,
"weekStart": ""
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +0,0 @@
apiVersion: 1
providers:
- name: default
folder: 'Provisioned'
type: file
disableDeletion: false
options:
path: {{ env "NOMAD_ALLOC_DIR" }}/config/provisioning/dashboards/default

View File

@ -1,19 +0,0 @@
---
apiVersion: 1
datasources:
- name: HASS Metrics
url: "http://192.168.2.75:8086"
type: influxdb
access: proxy
database: hass
jsonData:
dbName: hass
- name: Proxmox Metrics
url: "http://192.168.2.75:8086"
type: influxdb
access: proxy
database: proxmox
jsonData:
dbName: proxmox

View File

@ -1,16 +0,0 @@
---
apiVersion: 1
datasources:
- name: Blocky logs
url: 127.0.0.1:3306
# TODO: Looking for an acl friendly way to expose this since it's a variable in blocky setup
database: blocky
type: mysql
isDefault: false
version: 1
{{ with nomadVar "nomad/jobs/grafana" -}}
user: {{ .db_user_ro }}
secureJsonData:
password: {{ .db_pass_ro }}
{{- end }}

View File

@ -1,96 +0,0 @@
variable "lego_version" {
default = "4.14.2"
type = string
}
variable "nomad_var_dirsync_version" {
default = "0.0.2"
type = string
}
job "lego" {
type = "batch"
periodic {
cron = "@weekly"
prohibit_overlap = true
}
group "main" {
network {
dns {
servers = ["1.1.1.1", "1.0.0.1"]
}
}
task "main" {
driver = "exec"
config {
command = "/bin/bash"
args = ["${NOMAD_TASK_DIR}/start.sh"]
}
artifact {
source = "https://github.com/go-acme/lego/releases/download/v${var.lego_version}/lego_v${var.lego_version}_linux_${attr.cpu.arch}.tar.gz"
}
artifact {
source = "https://git.iamthefij.com/iamthefij/nomad-var-dirsync/releases/download/v${var.nomad_var_dirsync_version}/nomad-var-dirsync-linux-${attr.cpu.arch}.tar.gz"
}
template {
data = <<EOH
#! /bin/sh
set -ex
cd ${NOMAD_TASK_DIR}
echo "Read certs from nomad vars"
${NOMAD_TASK_DIR}/nomad-var-dirsync-linux-{{ env "attr.cpu.arch" }} -root-var=secrets/certs read .
action=run
if [ -f /.lego/certificates/_.thefij.rocks.crt ]; then
action=renew
fi
echo "Attempt to $action certificates"
${NOMAD_TASK_DIR}/lego \
--accept-tos --pem \
--email=iamthefij@gmail.com \
--domains="*.thefij.rocks" \
--dns="cloudflare" \
$action \
--$action-hook="${NOMAD_TASK_DIR}/nomad-var-dirsync-linux-{{ env "attr.cpu.arch" }} -root-var=secrets/certs write .lego" \
EOH
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/lego" -}}
CF_DNS_API_TOKEN={{ .domain_lego_dns }}
CF_ZONE_API_TOKEN={{ .domain_lego_dns }}
{{- end }}
EOH
destination = "secrets/cloudflare.env"
env = true
}
env = {
NOMAD_ADDR = "unix:///secrets/api.sock"
}
identity {
env = true
}
resources {
cpu = 50
memory = 100
}
}
}
}

View File

@ -1,23 +0,0 @@
resource "nomad_job" "lego" {
jobspec = file("${path.module}/lego.nomad")
}
resource "nomad_acl_policy" "secrets_certs_write" {
name = "secrets-certs-write"
description = "Write certs to secrets store"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/certs/*" {
capabilities = ["write", "read"]
}
path "secrets/certs" {
capabilities = ["write", "read"]
}
}
}
EOH
job_acl {
job_id = "lego/*"
}
}

View File

@ -1,41 +0,0 @@
auth_enabled: false
server:
http_listen_port: 3100
common:
ring:
instance_addr: 127.0.0.1
kvstore:
store: inmemory
replication_factor: 1
path_prefix: /tmp/loki
schema_config:
configs:
- from: 2020-05-15
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
storage_config:
boltdb_shipper:
active_index_directory: {{ env "NOMAD_TASK_DIR" }}/index
filesystem:
directory: {{ env "NOMAD_TASK_DIR" }}/chunks
limits_config:
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 168h
table_manager:
retention_deletes_enabled: true
retention_period: 168h

View File

@ -1,24 +0,0 @@
module "loki" {
source = "../services/service"
detach = false
name = "loki"
image = "grafana/loki:2.8.7"
args = ["--config.file=$${NOMAD_TASK_DIR}/loki-config.yml"]
service_port = 3100
ingress = true
use_wesher = var.use_wesher
service_check = {
path = "/ready"
}
sticky_disk = true
templates = [
{
data = file("${path.module}/loki-config.yml")
dest = "loki-config.yml"
mount = false
}
]
}

View File

@ -1,27 +0,0 @@
module "blocky" {
source = "./blocky"
use_wesher = var.use_wesher
# Not in this module
# depends_on = [module.databases]
}
module "traefik" {
source = "./traefik"
}
resource "nomad_job" "nomad-client-stalker" {
# Stalker used to allow using Nomad service registry to identify nomad client hosts
jobspec = file("${path.module}/nomad-client-stalker.nomad")
}
resource "nomad_job" "syslog-ng" {
jobspec = file("${path.module}/syslogng.nomad")
depends_on = [module.loki]
}
resource "nomad_job" "ddclient" {
jobspec = file("${path.module}/ddclient.nomad")
}

View File

@ -1,32 +0,0 @@
job "nomad-client-stalker" {
type = "system"
group "main" {
network {
mode = "host"
port "main" {}
}
service {
name = "nomad-client-stalker"
provider = "nomad"
port = "main"
}
task "main" {
driver = "docker"
config {
image = "busybox"
args = ["tail", "-f", "/dev/null"]
}
resources {
cpu = 10
memory = 15
memory_max = 30
}
}
}
}

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.3.1"
hashes = [
"h1:lMueBNB2GJ/a5rweL9NPybwVfDH/Q1s+rQvt5Y+kuYs=",
"zh:1e7893a3fbebff171bcc5581b70a16eea33193c7e9dd73402ba5c04b7202f0bb",
"zh:252cfd3fee4811c83bc74406ba1bc1bbb83d6de20e50a86f93737f8f86864171",
"zh:387a7140be6dfa3f8d27f09d1eb2b9f3b84900328fe5a0478e9b3bd91a845808",
"zh:49848fa491ac26b0568b112a57d14cc49772607c7cf405e2f74dd537407214b1",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:7b9f345f5bb5f17c5d0bc3d373c25828934a3cbcdb331e0eab54eb47f1355fb2",
"zh:8e276f4de508a86e725fffc02ee891db73397c35dbd591d8918af427eeec93a1",
"zh:90b349933d2fd28f822a36128be4625bb816aa9f20ec314c79c77306f632ae87",
"zh:a0ca6fd6cd94a52684e432104d3dc170a74075f47d9d4ba725cc340a438ed75a",
"zh:a6cffc45535a0ff8206782538b3eeaef17dc93d0e1fd58bc1e6f7d5aa0f6ba1a",
"zh:c010807b5d3e03d769419787b0e5d4efa6963134e1873a413102af6bf3dd1c49",
"zh:faf962ee1981e897e99f7e528642c7e74beed37afd8eaf743e6ede24df812d80",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.2"
hashes = [
"h1:wmG0QFjQ2OfyPy6BB7mQ57WtoZZGGV07uAPQeDmIrAE=",
"zh:0ef01a4f81147b32c1bea3429974d4d104bbc4be2ba3cfa667031a8183ef88ec",
"zh:1bcd2d8161e89e39886119965ef0f37fcce2da9c1aca34263dd3002ba05fcb53",
"zh:37c75d15e9514556a5f4ed02e1548aaa95c0ecd6ff9af1119ac905144c70c114",
"zh:4210550a767226976bc7e57d988b9ce48f4411fa8a60cd74a6b246baf7589dad",
"zh:562007382520cd4baa7320f35e1370ffe84e46ed4e2071fdc7e4b1a9b1f8ae9b",
"zh:5efb9da90f665e43f22c2e13e0ce48e86cae2d960aaf1abf721b497f32025916",
"zh:6f71257a6b1218d02a573fc9bff0657410404fb2ef23bc66ae8cd968f98d5ff6",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:9647e18f221380a85f2f0ab387c68fdafd58af6193a932417299cdcae4710150",
"zh:bb6297ce412c3c2fa9fec726114e5e0508dd2638cad6a0cb433194930c97a544",
"zh:f83e925ed73ff8a5ef6e3608ad9225baa5376446349572c2449c0c0b3cf184b7",
"zh:fbef0781cb64de76b1df1ca11078aecba7800d82fd4a956302734999cfd9a4af",
]
}

View File

@ -1,50 +0,0 @@
resource "random_password" "oidc_client_id" {
length = 72
override_special = "-._~"
}
resource "random_password" "oidc_secret" {
length = 72
override_special = "-._~"
}
resource "nomad_variable" "authelia_oidc_secret" {
path = "secrets/authelia/${var.name}"
items = {
client_id = resource.random_password.oidc_client_id.result
secret = resource.random_password.oidc_secret.result
secret_hash = resource.random_password.oidc_secret.bcrypt_hash
}
}
resource "nomad_variable" "authelia_access_control_oidc" {
path = "authelia/access_control/oidc_clients/${var.name}"
items = {
id = resource.random_password.oidc_client_id.result
description = var.oidc_client_config.description
authorization_policy = var.oidc_client_config.authorization_policy
redirect_uris = yamlencode(var.oidc_client_config.redirect_uris)
scopes = yamlencode(var.oidc_client_config.scopes)
}
}
resource "nomad_acl_policy" "oidc_authelia" {
count = var.job_acl != null ? 1 : 0
name = "${var.name}-authelia"
description = "Give access to shared authelia variables"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/authelia/${var.name}" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = var.job_acl.job_id
group = var.job_acl.group
task = var.job_acl.task
}
}

View File

@ -1,11 +0,0 @@
output "client_id" {
value = resource.random_password.oidc_client_id.result
}
output "secret" {
value = resource.random_password.oidc_secret.result
}
output "secret_hash" {
value = resource.random_password.oidc_secret.bcrypt_hash
}

View File

@ -1,25 +0,0 @@
variable "name" {
description = "Name of service"
type = string
}
variable "oidc_client_config" {
description = "Authelia oidc client configuration to enable oidc authentication"
type = object({
description = string
authorization_policy = optional(string, "one_factor")
redirect_uris = list(string)
scopes = list(string)
})
}
variable "job_acl" {
description = "Job ACL that should be given to the secrets"
type = object({
job_id = string
group = optional(string)
task = optional(string)
})
default = null
}

View File

@ -1,169 +0,0 @@
job "prometheus" {
datacenters = ["dc1"]
group "prometheus" {
count = 1
network {
mode = "bridge"
port "web" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 9090
}
port "pushgateway" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
static = 9091
}
}
ephemeral_disk {
migrate = true
sticky = true
}
service {
name = "prometheus"
provider = "nomad"
port = "web"
// TODO: Remove traefik tags
tags = [
"traefik.enable=true",
"traefik.http.routers.prometheus.entryPoints=websecure",
]
check {
type = "http"
path = "/-/healthy"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
}
service {
name = "pushgateway"
provider = "nomad"
port = "pushgateway"
check {
type = "http"
path = "/-/healthy"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
}
task "prometheus" {
driver = "docker"
config {
image = "prom/prometheus:v2.43.0"
ports = ["web"]
args = [
"--config.file=$${NOMAD_TASK_DIR}/prometheus.yml",
"--storage.tsdb.path=$${NOMAD_ALLOC_DIR}/data/tsdb",
"--web.listen-address=0.0.0.0:9090",
"--web.console.libraries=/usr/share/prometheus/console_libraries",
"--web.console.templates=/usr/share/prometheus/consoles",
]
}
template {
data = <<EOF
---
global:
scrape_interval: 30s
evaluation_interval: 3s
scrape_configs:
- job_name: prometheus
static_configs:
- targets:
- 127.0.0.1:9090
- job_name: "pushgateway"
honor_labels: true
static_configs:
- targets:
- 127.0.0.1:9091
- job_name: "nomad_client"
metrics_path: "/v1/metrics"
params:
format:
- "prometheus"
nomad_sd_configs:
# TODO: Use NOMAD_SECRETS_DIR/api.sock and workload idenity when
# workload acls can be set using terraform
- server: "http://{{env "attr.unique.network.ip-address"}}:4646"
relabel_configs:
- source_labels: [__meta_nomad_service]
regex: nomad-client-stalker
action: keep
- source_labels: [__meta_nomad_address]
replacement: "$1:4646"
target_label: __address__
- job_name: "nomad_services"
metrics_path: "/metrics"
nomad_sd_configs:
- server: "http://{{env "attr.unique.network.ip-address"}}:4646"
relabel_configs:
- source_labels: [__meta_nomad_tags]
regex: .*(prometheus.scrape).*
action: keep
- source_labels: [__meta_nomad_service_address,__meta_nomad_service_port]
separator: ":"
target_label: __address__
- source_labels: [__meta_nomad_service]
target_label: nomad_service
- source_labels: [__meta_nomad_dc]
target_label: nomad_dc
- source_labels: [__meta_nomad_node_id]
target_label: nomad_node_id
EOF
change_mode = "signal"
change_signal = "SIGHUP"
destination = "$${NOMAD_TASK_DIR}/prometheus.yml"
}
resources {
cpu = 100
memory = 300
}
}
task "pushgateway" {
driver = "docker"
config {
image = "prom/pushgateway"
ports = ["pushgateway"]
args = [
"--persistence.file=$${NOMAD_ALLOC_DIR}/pushgateway-persistence",
]
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -1,7 +0,0 @@
resource "nomad_job" "prometheus" {
jobspec = templatefile("${path.module}/prometheus.nomad", {
use_wesher = var.use_wesher,
})
detach = false
}

View File

@ -1,21 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.1.0"
hashes = [
"h1:ek0L7fA+4R1/BXhbutSRqlQPzSZ5aY/I2YfVehuYeEU=",
"zh:39ba4d4fc9557d4d2c1e4bf866cf63973359b73e908cce237c54384512bdb454",
"zh:40d2b66e3f3675e6b88000c145977c1d5288510c76b702c6c131d9168546c605",
"zh:40fbe575d85a083f96d4703c6b7334e9fc3e08e4f1d441de2b9513215184ebcc",
"zh:42ce6db79e2f94557fae516ee3f22e5271f0b556638eb45d5fbad02c99fc7af3",
"zh:4acf63dfb92f879b3767529e75764fef68886521b7effa13dd0323c38133ce88",
"zh:72cf35a13c2fb542cd3c8528826e2390db9b8f6f79ccb41532e009ad140a3269",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:8b8bcc136c05916234cb0c3bcc3d48fda7ca551a091ad8461ea4ab16fb6960a3",
"zh:8e1c2f924eae88afe7ac83775f000ae8fd71a04e06228edf7eddce4df2421169",
"zh:abc6e725531fc06a8e02e84946aaabc3453ecafbc1b7a442ea175db14fd9c86a",
"zh:b735fcd1fb20971df3e92f81bb6d73eef845dcc9d3d98e908faa3f40013f0f69",
"zh:ce59797282505d872903789db8f092861036da6ec3e73f6507dac725458a5ec9",
]
}

View File

@ -1,333 +0,0 @@
job "traefik" {
datacenters = ["dc1"]
type = "service"
priority = 100
constraint {
attribute = "${node.class}"
value = "ingress"
}
constraint {
distinct_hosts = true
}
update {
max_parallel = 1
canary = 1
auto_promote = false
auto_revert = true
min_healthy_time = "30s"
healthy_deadline = "5m"
}
group "traefik" {
count = 2
network {
port "web" {
static = 80
}
port "websecure" {
static = 443
}
port "syslog" {
static = 514
}
port "gitssh" {
static = 2222
}
port "metrics" {}
dns {
servers = [
"192.168.2.101",
"192.168.2.102",
"192.168.2.30",
]
}
}
ephemeral_disk {
migrate = true
sticky = true
}
task "traefik" {
driver = "docker"
service {
name = "traefik"
provider = "nomad"
port = "web"
check {
type = "http"
path = "/ping"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.traefik.entryPoints=websecure",
"traefik.http.routers.traefik.service=api@internal",
]
}
service {
name = "traefik-metrics"
provider = "nomad"
port = "metrics"
tags = [
"prometheus.scrape",
]
}
config {
image = "traefik:3.0"
ports = ["web", "websecure", "syslog", "gitssh", "metrics"]
network_mode = "host"
mount {
type = "bind"
target = "/etc/traefik"
source = "local/config"
}
mount {
type = "bind"
target = "/etc/traefik/usersfile"
source = "secrets/usersfile"
}
mount {
type = "bind"
target = "/etc/traefik/certs"
source = "secrets/certs"
}
}
env = {
TRAEFIK_PROVIDERS_NOMAD_ENDPOINT_TOKEN = "${NOMAD_TOKEN}"
}
identity {
env = true
}
template {
# Avoid conflict with TOML lists [[ ]] and Go templates {{ }}
left_delimiter = "<<"
right_delimiter = ">>"
data = <<EOH
[log]
level = "DEBUG"
[entryPoints]
[entryPoints.web]
address = ":80"
[entryPoints.web.http]
[entryPoints.web.http.redirections]
[entryPoints.web.http.redirections.entrypoint]
to = "websecure"
scheme = "https"
[entryPoints.websecure]
address = ":443"
[entryPoints.websecure.http.tls]
[entryPoints.metrics]
address = ":<< env "NOMAD_PORT_metrics" >>"
[entryPoints.syslogtcp]
address = ":514"
[entryPoints.syslogudp]
address = ":514/udp"
[entryPoints.gitssh]
address = ":2222"
[api]
dashboard = true
[ping]
entrypoint = "web"
[metrics]
[metrics.prometheus]
entrypoint = "metrics"
# manualRouting = true
[providers.file]
directory = "/etc/traefik/conf"
watch = true
[providers.nomad]
exposedByDefault = false
defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)"
[providers.nomad.endpoint]
address = "unix:///secrets/api.sock"
EOH
destination = "${NOMAD_TASK_DIR}/config/traefik.toml"
}
template {
data = <<EOH
[http]
[http.routers]
[http.routers.nomad]
entryPoints = ["websecure"]
service = "nomad"
rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
{{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path }}
[http.routers.{{ .name }}]
entryPoints = ["websecure"]
service = "{{ .name }}"
rule = "Host(`{{ .subdomain }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`){{ with .path_prefix.Value }}&&PathPrefix(`{{ . }}`){{ end }}"
{{ $name := .name -}}
{{ with .path_prefix.Value -}}
middlewares = ["{{ $name }}@file"]
{{ end }}
{{- end }}{{ end }}
#[http.middlewares]
# {{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path -}}
# {{ $name := .name -}}
# {{ with .path_prefix.Value -}}
# [http.middlewares.{{ $name }}.stripPrefix]
# prefixes = ["{{ . }}"]
# {{ end }}
# {{- end }}{{ end }}
[http.services]
[http.services.nomad]
[http.services.nomad.loadBalancer]
[[http.services.nomad.loadBalancer.servers]]
url = "http://127.0.0.1:4646"
{{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path }}
[http.services.{{ .name }}]
[http.services.{{ .name }}.loadBalancer]
[[http.services.{{ .name }}.loadBalancer.servers]]
url = "{{ .url }}"
{{- end }}{{ end }}
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/route-hashi.toml"
change_mode = "noop"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOH
{{ with nomadService "syslogng" -}}
[tcp.routers]
[tcp.routers.syslogtcp]
entryPoints = ["syslogtcp"]
service = "syslogngtcp"
rule = "HostSNI(`*`)"
[tcp.services]
[tcp.services.syslogngtcp]
[tcp.services.syslogngtcp.loadBalancer]
{{ range . -}}
[[tcp.services.syslogngtcp.loadBalancer.servers]]
address = "{{ .Address }}:{{ .Port }}"
{{ end -}}
{{- end }}
{{ with nomadService "syslogng" -}}
[udp.routers]
[udp.routers.syslogudp]
entryPoints = ["syslogudp"]
service = "syslogngudp"
[udp.services]
[udp.services.syslogngudp]
[udp.services.syslogngudp.loadBalancer]
{{ range . -}}
[[udp.services.syslogngudp.loadBalancer.servers]]
address = "{{ .Address }}:{{ .Port }}"
{{ end -}}
{{- end }}
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/route-syslog-ng.toml"
change_mode = "noop"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{- with nomadVar "secrets/certs/_lego/certificates/__thefij_rocks_crt" }}{{ .contents }}{{ end -}}"
EOF
destination = "${NOMAD_SECRETS_DIR}/certs/_.thefij.rocks.crt"
change_mode = "noop"
}
template {
data = <<EOF
{{- with nomadVar "secrets/certs/_lego/certificates/__thefij_rocks_key" }}{{ .contents }}{{ end -}}"
EOF
destination = "${NOMAD_SECRETS_DIR}/certs/_.thefij.rocks.key"
change_mode = "noop"
}
template {
data = <<EOH
[[tls.certificates]]
certFile = "/etc/traefik/certs/_.thefij.rocks.crt"
keyFile = "/etc/traefik/certs/_.thefij.rocks.key"
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/dynamic-tls.toml"
change_mode = "noop"
}
template {
data = <<EOH
[http.middlewares]
{{ with nomadVar "nomad/jobs/traefik" }}
{{ if .usersfile }}
[http.middlewares.basic-auth.basicAuth]
usersFile = "/etc/traefik/usersfile"
{{- end }}
{{- end }}
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/middlewares.toml"
change_mode = "noop"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/traefik" -}}
{{ .usersfile }}
{{- end }}
EOH
destination = "${NOMAD_SECRETS_DIR}/usersfile"
change_mode = "noop"
}
resources {
cpu = 100
memory = 150
}
}
}
}

View File

@ -1,90 +0,0 @@
resource "nomad_job" "traefik" {
jobspec = file("${path.module}/traefik.nomad")
}
resource "nomad_acl_policy" "treafik_secrets_certs_read" {
name = "traefik-secrets-certs-read"
description = "Read certs to secrets store"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/certs/*" {
capabilities = ["read"]
}
path "secrets/certs" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = resource.nomad_job.traefik.id
}
}
resource "nomad_acl_policy" "traefik_query_jobs" {
name = "traefik-query-jobs"
description = "Allow traefik to query jobs"
rules_hcl = <<EOH
namespace "default" {
capabilities = ["list-jobs", "read-job"]
}
EOH
job_acl {
job_id = resource.nomad_job.traefik.id
}
}
resource "nomad_acl_policy" "treafik_external" {
name = "traefik-exernal"
description = "Read external services"
rules_hcl = <<EOH
namespace "default" {
variables {
path "traefik_external/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "traefik"
}
}
resource "nomad_variable" "traefik_external_hass" {
path = "traefik_external/hass"
items = {
name = "hass"
subdomain = "hass",
url = "http://192.168.3.65:8123"
}
}
resource "nomad_variable" "traefik_external_plex" {
path = "traefik_external/plex"
items = {
name = "plex"
subdomain = "plex",
url = "http://agnosticfront.thefij:32400"
}
}
resource "nomad_variable" "traefik_external_appdaemon" {
path = "traefik_external/appdaemon"
items = {
name = "appdaemon"
subdomain = "appdash",
url = "http://192.168.3.65:5050"
# path_prefix = "/add"
}
}
resource "nomad_variable" "traefik_external_jellyfin" {
path = "traefik_external/jellyfin"
items = {
name = "jellyfin"
subdomain = "jellyfin",
url = "http://agnosticfront.thefij:8096"
}
}

View File

@ -1,11 +0,0 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.0.0"
hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}

70
databases/adminer.nomad Normal file
View File

@ -0,0 +1,70 @@
job "adminer" {
datacenters = ["dc1"]
type = "service"
group "adminer" {
count = 1
# Some affinity to stateful hosts?
network {
mode = "bridge"
port "adminer" {
host_network = "loopback"
to = 8080
}
}
service {
name = "adminer"
port = "adminer"
connect {
sidecar_service {
proxy {
local_service_port = 8080
upstreams {
destination_name = "mysql-server"
# TODO: how do I get these to not bind to the host eth0 address
local_bind_port = 4040
}
config {
protocol = "tcp"
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 25
}
}
}
tags = [
"traefik.enable=true",
"traefik.http.routers.adminer.entryPoints=websecure",
]
}
task "adminer" {
driver = "docker"
config {
image = "adminer"
ports = ["adminer"]
}
env = {
"ADMINER_DEFAULT_SERVER" = "${NOMAD_UPSTREAM_ADDR_mysql_server}"
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -1,253 +0,0 @@
job "lldap" {
datacenters = ["dc1"]
type = "service"
priority = 80
update {
auto_revert = true
}
group "lldap" {
network {
mode = "bridge"
port "web" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
}
port "ldap" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
}
port "tls" {}
}
service {
name = "lldap"
provider = "nomad"
port = "ldap"
}
service {
name = "lldap-tls"
provider = "nomad"
port = "tls"
}
service {
name = "ldap-admin"
provider = "nomad"
port = "web"
tags = [
"traefik.enable=true",
"traefik.http.routers.ldap-admin.entryPoints=websecure",
]
}
task "lldap" {
driver = "docker"
config {
image = "ghcr.io/lldap/lldap:v0.5"
ports = ["ldap", "web"]
args = ["run", "--config-file", "$${NOMAD_TASK_DIR}/lldap_config.toml"]
}
env = {
"LLDAP_VERBOSE" = "true"
"LLDAP_LDAP_PORT" = "$${NOMAD_PORT_ldap}"
"LLDAP_HTTP_PORT" = "$${NOMAD_PORT_web}"
"LLDAP_DATABASE_URL_FILE" = "$${NOMAD_SECRETS_DIR}/database_url.txt"
"LLDAP_KEY_SEED_FILE" = "$${NOMAD_SECRETS_DIR}/key_seed.txt"
"LLDAP_JWT_SECRET_FILE" = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
"LLDAP_USER_PASS_FILE" = "$${NOMAD_SECRETS_DIR}/user_pass.txt"
"LLDAP_SMTP_OPTIONS__PASSWORD_FILE" = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
}
template {
data = <<EOH
ldap_base_dn = "{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}"
{{ with nomadVar "secrets/ldap" -}}
ldap_user_dn = "{{ .admin_user }}"
ldap_user_email = "{{ .admin_email }}"
{{ end -}}
{{ with nomadVar "nomad/jobs/lldap" -}}
[smtp_options]
from = "{{ .smtp_from }}"
reply_to = "{{ .smtp_reply_to }}"
enable_password_reset = true
{{ end -}}
{{ with nomadVar "secrets/smtp" -}}
server = "{{ .server }}"
port = {{ .port }}
tls_required = {{ .tls.Value | toLower }}
user = "{{ .user }}"
{{ end -}}
EOH
destination = "$${NOMAD_TASK_DIR}/lldap_config.toml"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}mysql://{{ .db_user }}:{{ .db_pass }}@127.0.0.1:3306/{{ .db_name }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/database_url.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}{{ .key_seed }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/key_seed.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}{{ .jwt_secret }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"secrets/ldap\" }}{{ .admin_password }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/user_pass.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"secrets/smtp\" }}{{ .password }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
change_mode = "restart"
}
resources {
cpu = 10
memory = 200
memory_max = 200
}
}
task "bootstrap" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "mariadb:10"
args = [
"/usr/bin/timeout",
"20m",
"/bin/bash",
"-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
]
}
template {
data = <<EOF
[client]
host=127.0.0.1
port=3306
user=root
{{ with nomadVar "secrets/mysql" -}}
password={{ .mysql_root_password }}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/lldap" -}}
{{ $db_name := .db_name }}
CREATE DATABASE IF NOT EXISTS `{{ .db_name }}`
CHARACTER SET = 'utf8mb4'
COLLATE = 'utf8mb4_unicode_ci';
DROP USER IF EXISTS '{{ .db_user }}'@'%';
CREATE USER '{{ .db_user }}'@'%'
IDENTIFIED BY '{{ .db_pass }}';
GRANT ALL ON `{{ .db_name }}`.*
TO '{{ .db_user }}'@'%';
{{ else -}}
SELECT 'NOOP';
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql"
}
resources {
cpu = 50
memory = 50
}
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[ldap_server]
accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:{{ env "NOMAD_PORT_ldap" }}
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ range nomadVarList "secrets/ldap/allowed_psks" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/lldap" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
}
}
}

View File

@ -1,123 +0,0 @@
resource "nomad_job" "lldap" {
jobspec = templatefile("${path.module}/lldap.nomad", {
use_wesher = var.use_wesher,
})
depends_on = [resource.nomad_job.mysql-server]
# Block until deployed as there are servics dependent on this one
detach = false
}
# Give access to ldap secrets
resource "nomad_acl_policy" "lldap_ldap_secrets" {
name = "lldap-secrets-ldap"
description = "Give access to LDAP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/ldap/*" {
capabilities = ["read"]
}
path "secrets/ldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "lldap_ldap_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "lldap_ldap_psk" {
path = "secrets/ldap/allowed_psks/ldap"
items = {
psk = "lldap:${resource.random_password.lldap_ldap_psk.result}"
}
}
# Give access to smtp secrets
resource "nomad_acl_policy" "lldap_smtp_secrets" {
name = "lldap-secrets-smtp"
description = "Give access to SMTP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/smtp" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "lldap"
}
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "lldap_mysql_bootstrap_secrets" {
name = "lldap-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "bootstrap"
}
}
resource "random_password" "lldap_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "lldap_mysql_psk" {
path = "secrets/mysql/allowed_psks/lldap"
items = {
psk = "lldap:${resource.random_password.lldap_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "lldap_mysql_psk" {
name = "lldap-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/lldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "stunnel"
}
}

Some files were not shown because too many files have changed in this diff Show More