Compare commits

..

1 Commits
main ... immich

Author SHA1 Message Date
IamTheFij c8e03e4c0d WIP: Deploy immich with postgres 2022-09-28 12:30:25 -07:00
178 changed files with 8750 additions and 15204 deletions

51
.gitignore vendored
View File

@ -1,53 +1,8 @@
# ---> Terraform
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sentitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
#
*.tfvars
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
#
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
# ---> Ansible
*.retry
ansible_galaxy/ansible_collections/
ansible_galaxy/roles/
# Repo specific
roles/
venv/
ca/
# Non-public bootstrap values
vault-keys.json
nomad_bootstrap.json
ca/
collections/ansible_collections/
consul_values.yml
vault_hashi_vault_values.yml
vault_*.yml
ansible_playbooks/vars/nomad_vars.yml

View File

@ -1,34 +0,0 @@
---
repos:
- repo: https://github.com/antonbabenko/pre-commit-terraform
rev: v1.76.0
hooks:
- id: terraform_fmt
- id: terraform_validate
args:
- --tf-init-args=-lockfile=readonly
- id: terraform_tflint
args:
- --args=--config=__GIT_WORKING_DIR__/.tflint.hcl
- id: terraform_tfsec
# - id: terraform_providers_lock
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
- id: check-added-large-files
- id: check-merge-conflict
- id: end-of-file-fixer
exclude: "^ansible_playbooks/vars/nomad_vars.sample.yml$"
- id: trailing-whitespace
- repo: https://github.com/Yelp/detect-secrets
rev: v1.4.0
hooks:
- id: detect-secrets
args: ['--baseline', '.secrets-baseline']
- repo: local
hooks:
- id: variable-sample
name: generate variable sample file
language: system
entry: bash -c 'venv/bin/python scripts/nomad_vars.py print > ./ansible_playbooks/vars/nomad_vars.sample.yml'
types: [file]

View File

@ -1,191 +0,0 @@
{
"version": "1.4.0",
"plugins_used": [
{
"name": "ArtifactoryDetector"
},
{
"name": "AWSKeyDetector"
},
{
"name": "AzureStorageKeyDetector"
},
{
"name": "Base64HighEntropyString",
"limit": 4.5
},
{
"name": "BasicAuthDetector"
},
{
"name": "CloudantDetector"
},
{
"name": "DiscordBotTokenDetector"
},
{
"name": "GitHubTokenDetector"
},
{
"name": "HexHighEntropyString",
"limit": 3.0
},
{
"name": "IbmCloudIamDetector"
},
{
"name": "IbmCosHmacDetector"
},
{
"name": "JwtTokenDetector"
},
{
"name": "KeywordDetector",
"keyword_exclude": ""
},
{
"name": "MailchimpDetector"
},
{
"name": "NpmDetector"
},
{
"name": "PrivateKeyDetector"
},
{
"name": "SendGridDetector"
},
{
"name": "SlackDetector"
},
{
"name": "SoftlayerDetector"
},
{
"name": "SquareOAuthDetector"
},
{
"name": "StripeDetector"
},
{
"name": "TwilioKeyDetector"
}
],
"filters_used": [
{
"path": "detect_secrets.filters.allowlist.is_line_allowlisted"
},
{
"path": "detect_secrets.filters.common.is_baseline_file",
"filename": ".secrets-baseline"
},
{
"path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies",
"min_level": 2
},
{
"path": "detect_secrets.filters.heuristic.is_indirect_reference"
},
{
"path": "detect_secrets.filters.heuristic.is_likely_id_string"
},
{
"path": "detect_secrets.filters.heuristic.is_lock_file"
},
{
"path": "detect_secrets.filters.heuristic.is_not_alphanumeric_string"
},
{
"path": "detect_secrets.filters.heuristic.is_potential_uuid"
},
{
"path": "detect_secrets.filters.heuristic.is_prefixed_with_dollar_sign"
},
{
"path": "detect_secrets.filters.heuristic.is_sequential_string"
},
{
"path": "detect_secrets.filters.heuristic.is_swagger_file"
},
{
"path": "detect_secrets.filters.heuristic.is_templated_secret"
},
{
"path": "detect_secrets.filters.regex.should_exclude_secret",
"pattern": [
"(\\${.*}|from_env|fake|!secret|VALUE)"
]
}
],
"results": {
"core/authelia.yml": [
{
"type": "Secret Keyword",
"filename": "core/authelia.yml",
"hashed_secret": "7cb6efb98ba5972a9b5090dc2e517fe14d12cb04",
"is_verified": false,
"line_number": 54,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/authelia.yml",
"hashed_secret": "a32b08d97b1615dc27f58b6b17f67624c04e2c4f",
"is_verified": false,
"line_number": 189,
"is_secret": false
}
],
"core/grafana/grafana.ini": [
{
"type": "Basic Auth Credentials",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4",
"is_verified": false,
"line_number": 78,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "55ebda65c08313526e7ba08ad733e5ebea9900bd",
"is_verified": false,
"line_number": 109,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "d033e22ae348aeb5660fc2140aec35850c4da997",
"is_verified": false,
"line_number": 151,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "10bea62ff1e1a7540dc7a6bc10f5fa992349023f",
"is_verified": false,
"line_number": 154,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "5718bce97710e6be87ea160b36eaefb5032857d3",
"is_verified": false,
"line_number": 239,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "10aed9d7ebef778a9b3033dba3f7813b639e0d50",
"is_verified": false,
"line_number": 252,
"is_secret": false
}
]
},
"generated_at": "2024-02-20T18:04:29Z"
}

View File

@ -1,40 +1,78 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.2.0"
provider "registry.terraform.io/hashicorp/consul" {
version = "2.14.0"
hashes = [
"h1:BAjqzVkuXxHtRKG+l9unaZJPk2kWZpSTCEcQPRcl2so=",
"zh:052f909d25121e93dc799290216292fca67943ccde12ba515068b838a6ff8c66",
"zh:20e29aeb9989f7a1e04bb4093817c7acc4e1e737bb21a3066f3ea46f2001feff",
"zh:2326d101ef427599b72cce30c0e0c1d18ae783f1a897c20f2319fbf54bab0a61",
"zh:3420cbe4fd19cdc96d715d0ae8e79c272608023a76033bbf582c30637f6d570f",
"zh:41ec570f87f578f1c57655e2e4fbdb9932d94cf92dc9cd11828cccedf36dd4a4",
"zh:5f90dcc58e3356ffead82ea211ecb4a2d7094d3c2fbd14ff85527c3652a595a2",
"zh:64aaa48609d2db868fcfd347490df0e12c6c3fcb8e4f12908c5d52b1a0adf73f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:86b4923e10e6ba407d1d2aab83740b702058e8b01460af4f5f0e4008f40e492c",
"zh:ae89dcba33097af33a306344d20e4e25181f15dcc1a860b42db5b7199a97c6a6",
"zh:ce56d68cdfba60891765e94f9c0bf69eddb985d44d97db9f91874bea027f08e2",
"zh:e993bcde5dbddaedf3331e3014ffab904f98ab0f5e8b5d6082b7ca5083e0a2f1",
"h1:lJWOdlqevg6FQLFlfM3tGOsy9yPrjm9/vqkfzVrqT/A=",
"h1:xRwktNwLL3Vo43F7v73tfcgbcnjCE2KgCzcNrsQJ1cc=",
"zh:06dcca1f76b839af8f86c7b6f65b944003a7a35b30b865b3884f48e2c42f9aee",
"zh:16111df6a485e21cee6ca33cb863434baa1ca360c819c8e2af85e465c1361d2b",
"zh:26b59c82ac2861b2651c1fa31955c3e7790e3c2d5d097f22aa34d3c294da63cf",
"zh:70fd6853099126a602d5ac26caa80214a4a8a38f0cad8a5e3b7bef49923419d3",
"zh:7d4f0061d6fb86e0a5639ed02381063b868245082ec4e3a461bcda964ed00fcc",
"zh:a48cbf57d6511922362d5b0f76f449fba7a550c9d0702635fabb43b4f0a09fc0",
"zh:bb54994a53dd8e1ff84ca50742ce893863dc166fd41b91d951f4cb89fe6a6bc0",
"zh:bc61b19ee3c8d55a9915a3ad84203c87bfd0d57eca8eec788524b14e8b67f090",
"zh:cbe3238e756ada23c1e7c97c42a5c72bf810dc5bd1265c9f074c3e739d1090b0",
"zh:e30198054239eab46493e59956b9cd8c376c3bbd9515ac102a96d1fbd32e423f",
"zh:e74365dba529a0676107e413986d7be81c2125c197754ce69e3e89d8daa53153",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0"
provider "registry.terraform.io/hashicorp/external" {
version = "2.2.2"
hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"h1:e7RpnZ2PbJEEPnfsg7V0FNwbfSk0/Z3FdrLsXINBmDY=",
"zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca",
"zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28",
"zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b",
"zh:719d6ef39c50e4cffc67aa67d74d195adaf42afcf62beab132dafdb500347d39",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
"zh:7fbfc4d37435ac2f717b0316f872f558f608596b389b895fcb549f118462d327",
"zh:8ac71408204db606ce63fe8f9aeaf1ddc7751d57d586ec421e62d440c402e955",
"zh:a4cacdb06f114454b6ed0033add28006afa3f65a0ea7a43befe45fc82e6809fb",
"zh:bb5ce3132b52ae32b6cc005bc9f7627b95259b9ffe556de4dad60d47d47f21f0",
"zh:bb60d2976f125ffd232a7ccb4b3f81e7109578b23c9c6179f13a11d125dca82a",
"zh:f9540ecd2e056d6e71b9ea5f5a5cf8f63dd5c25394b9db831083a9d4ea99b372",
"zh:ffd998b55b8a64d4335a090b6956b4bf8855b290f7554dd38db3302de9c41809",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}
provider "registry.terraform.io/hashicorp/vault" {
version = "3.3.1"
hashes = [
"h1:SOTmxGynxFf1hECFq0/FGujGQZNktePze/4mfdR/iiU=",
"h1:i7EC2IF0KParI+JPA5ZtXJrAn3bAntW5gEMLvOXwpW4=",
"zh:3e1866037f43c1083ff825dce2a9e3853c757bb0121c5ae528ee3cf3f99b4113",
"zh:49636cc5c4939134e098c4ec0163c41fae103f24d7e1e8fc0432f8ad93d596a0",
"zh:5258a7001719c4aeb84f4c4da7115b795da4794754938a3c4176a4b578fe93a1",
"zh:7461738691e2e8ea91aba73d4351cfbc30fcaedcf0e332c9d35ef215f93aa282",
"zh:815529478e33a6727273b08340a4c62c9aeb3da02abf8f091bb4f545c8451fce",
"zh:8e6fede9f5e25b507faf6cacd61b997035b8b62859245861149ddb2990ada8eb",
"zh:9acc2387084b9c411e264c4351633bc82f9c4e420f8e6bbad9f87b145351f929",
"zh:b9e4af3b06386ceed720f0163a1496088c154aa1430ae072c525ffefa4b37891",
"zh:c7d5dfb8f8536694db6740e2a4afd2d681b60b396ded469282524c62ce154861",
"zh:d0850be710c6fd682634a2f823beed0164231cc873b1dc09038aa477c926f57c",
"zh:e90c2cba9d89db5eab295b2f046f24a53f23002bcfe008633d398fb3fa16d941",
]
}

View File

@ -1,7 +0,0 @@
rule "terraform_required_version" {
enabled = false
}
rule "terraform_required_providers" {
enabled = false
}

147
Makefile
View File

@ -1,76 +1,84 @@
SLEEP_FOR ?= 10
VENV ?= venv
SERVER ?= "192.168.2.41"
SSH_USER = iamthefij
SSH_KEY = ~/.ssh/id_ed25519
.PHONY: sleep
sleep:
sleep $(SLEEP_FOR)
.PHONY: rm-nomad
rm-nomad:
hashi-up nomad uninstall \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: default
default: check
.PHONY: nomad-up
nomad-up:
hashi-up nomad install \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS) \
--server --client
hashi-up nomad start \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: all
all: cluster bootstrap-values apply
.PHONY: rm-consul
rm-consul:
hashi-up consul uninstall \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: consul-up
consul-up:
hashi-up consul install \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS) \
--advertise-addr $(SERVER) \
--client-addr 0.0.0.0 \
--http-addr 0.0.0.0 \
--connect \
--server
hashi-up consul start \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: cluster
cluster: ansible-cluster
# Ensures virtualenv is present
$(VENV):
python3 -m venv $(VENV)
$(VENV)/bin/pip install -r requirements.txt
venv/bin/ansible:
python3 -m venv venv
./venv/bin/pip install ansible python-consul hvac
# Installs pre-commit hooks
.PHONY: install-hooks
install-hooks: $(VENV)
$(VENV)/bin/pre-commit install --install-hooks
.PHONY: galaxy
galaxy: venv/bin/ansible
./venv/bin/ansible-galaxy install -p roles -r roles/requirements.yml
./venv/bin/ansible-galaxy collection install -r collections/requirements.yml
# Checks files for encryption
.PHONY: check
check: $(VENV)
$(VENV)/bin/pre-commit run --all-files
# Creates a new secrets baseline
.secrets-baseline: $(VENV) Makefile
$(VENV)/bin/detect-secrets scan --exclude-secrets '(\$${.*}|from_env|fake|!secret|VALUE)' > .secrets-baseline
# Audits secrets against baseline
.PHONY: secrets-audit
secrets-audit: $(VENV) .secrets-baseline
$(VENV)/bin/detect-secrets audit .secrets-baseline
# Updates secrets baseline
.PHONY: secrets-update
secrets-update: $(VENV) .secrets-baseline
$(VENV)/bin/detect-secrets scan --baseline .secrets-baseline
.PHONY: ansible_galaxy
ansible_galaxy: ansible_galaxy/ansible_collections ansible_galaxy/roles
ansible_galaxy/ansible_collections: $(VENV) ./ansible_galaxy/requirements.yml
$(VENV)/bin/ansible-galaxy collection install -p ./ansible_galaxy -r ./ansible_galaxy/requirements.yml
ansible_galaxy/roles: $(VENV) ./ansible_galaxy/requirements.yml
$(VENV)/bin/ansible-galaxy install -p ./ansible_galaxy/roles -r ./ansible_galaxy/requirements.yml
.PHONY: ansible-cluster
ansible-cluster: $(VENV) ansible_galaxy
env VIRTUAL_ENV=$(VENV) $(VENV)/bin/ansible-playbook -K -vv \
ansible-cluster: venv/bin/ansible galaxy
env VIRTUAL_ENV=/Users/ifij/workspace/iamthefij/orchestration-tests/nomad/venv ./venv/bin/ansible-playbook -K -vv \
$(shell test -f vault-keys.json && echo '-e "@vault-keys.json"') \
./ansible_playbooks/setup-cluster.yml
-i ansible_hosts.yml -M ./roles ./setup-cluster.yml
.PHONY: bootstrap-values
bootstrap-values: $(VENV)
env NOMAD_ADDR=http://192.168.2.101:4646 \
NOMAD_TOKEN=$(shell jq -r .SecretID nomad_bootstrap.json) \
$(VENV)/bin/python ./scripts/nomad_vars.py
bootstrap-values: venv/bin/ansible galaxy
env VIRTUAL_ENV=/Users/ifij/workspace/iamthefij/orchestration-tests/nomad/venv ./venv/bin/ansible-playbook -vv \
$(shell test -f vault-keys.json && echo '-e "@vault-keys.json"') \
-i ansible_hosts.yml -M ./roles ./bootstrap-values.yml
.PHONY: recover-nomad
recover-nomad: $(VENV)
$(VENV)/bin/ansible-playbook -K ./ansible_playbooks/recover-nomad.yaml
.PHONY: stop-cluster
stop-cluster: $(VENV)
$(VENV)/bin/ansible-playbook -K ./ansible_playbooks/stop-cluster.yml
.PHONY: unseal-vault
unseal-vault: venv/bin/ansible galaxy
env VIRTUAL_ENV=/Users/ifij/workspace/iamthefij/orchestration-tests/nomad/venv ./venv/bin/ansible-playbook -K -vv \
-e "@vault-keys.json" -i ansible_hosts.yml -M ./roles ./unseal-vault.yml
.PHONY: init
init:
@ -80,26 +88,15 @@ init:
plan:
@terraform plan \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
-var "vault_token=$(shell jq -r .root_token vault-keys.json)"
.PHONY: apply
apply:
@terraform apply \
-auto-approve \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
-var "vault_token=$(shell jq -r .root_token vault-keys.json)"
.PHONY: refresh
refresh:
@terraform refresh \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: destroy
destroy:
@terraform destroy \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: clean
clean:
env VIRTUAL_ENV=$(VENV) $(VENV)/bin/ansible-playbook -vv \
./ansible_playbooks/clear-data.yml
find -name "*.tfstate" -exec rm '{}' \;
rm -f ./vault-keys.json ./nomad_bootstrap.json
# Install CNI on hosts?
# curl -L -o cni-plugins.tgz "https://github.com/containernetworking/plugins/releases/download/v1.0.0/cni-plugins-linux-$( [ $(uname -m) = aarch64 ] && echo arm64 || echo amd64)"-v1.0.0.tgz
# sudo mkdir -p /opt/cni/bin
# sudo tar -C /opt/cni/bin -xzf cni-plugins.tgz

View File

@ -1,46 +0,0 @@
# Homelab Nomad
My configuration for creating my home Nomad cluster and deploying services to it.
This repo is not designed as general purpose templates, but rather to fit my specific needs. That said, I have made an effort for things to be as useful as possible for someone wanting to use or modify this.
## Running
make all
## Design
Both Ansible and Terraform are used as part of this configuration. All hosts must be reachable over SSH prior to running any of this configuration.
To begin, Ansible runs a playbook to setup the cluster. This includes installing Nomad, bootstrapping the cluster and ACLs, setting up NFS shares, creating Nomad Host Volumes, and setting up Wesher as a Wireguard mesh between hosts.
After this is complete, Nomad variables must be set for services to access and configure correctly. This depends on variables to be set based on the sample file.
Finally, the Terraform configuration can be applied setting up all services deployed on the cluster.
The configuration of new services is intended to be as templated as possible and to avoid requiring changes in multiple places. For example, most services are configured with a template that provides reverse proxy, DNS records, database tunnels, database bootstrapping, metrics scraping, and authentication. The only real exception is backups, which requires a distinct job file, for now.
## What does it do?
* Nomad cluster for scheduling and configuring all services
* Blocky DNS servers with integrated ad blocking. This also provides service discovery
* Prometheus with autodiscovery of service metrics
* Loki and Promtail aggregating logs
* Minitor for service availability checks
* Grafana providing dashboards, alerting, and log searching
* Photoprism for photo management
* Remote and shared volumes over NFS
* Authelia for OIDC and Proxy based authentication with 2FA
* Sonarr and Lidarr for multimedia management
* Automated block based backups using Restic
## Step by step
1. Update hosts in `ansible_playbooks/ansible_hosts.yml`
2. Update `ansible_playbook/setup-cluster.yml`
1. Update backup DNS server
2. Update NFS shares from NAS
3. Update volumes to make sure they are valid paths
3. Create `ansible_playbooks/vars/nomad_vars.yml` based on the sample file. TODO: This is quite specific and probably impossible without more documentation
4. Run `make all`
5. Update your network DNS settings to use the new servers IP addresses

View File

@ -1,21 +1,59 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.20"
provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.1"
hashes = [
"h1:M/QVXHPfeySejJZI3I8mBYrL/J9VsbnyF/dKIMlUhXo=",
"zh:02989edcebe724fc0aa873b22176fd20074c4f46295e728010711a8fc5dfa72c",
"zh:089ba7d19bcf5c6bab3f8b8c5920eb6d78c52cf79bb0c5dfeb411c600e7efcba",
"zh:235865a2182ca372bcbf440201a8b8cc0715ad5dbc4de893d99b6f32b5be53ab",
"zh:67ea718764f3f344ecc6e027d20c1327b86353c8064aa90da3ec12cec4a88954",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:8c68c540f0df4980568bdd688c2adec86eda62eb2de154e3db215b16de0a7ae0",
"zh:911969c63a69a733be57b96d54c5966c9424e1abec8d5f20038c8cef3a504c65",
"zh:a673c92ddc9d47e8d53dcb9b376f1adcb4543488202fc83a3e7eab8677530684",
"zh:a94a73eae89fd8c8ebf872013079be41161d3f293f4026c92d45c4c5667dd613",
"zh:db6b89f8b696040c0344f00928e4cf6e0a75034421ba14cdcd8a4d23bc865dce",
"zh:e512c0b1239e3d66b60d22c2b4de19fea288e492cde90dff9277cc475fd9dbbf",
"zh:ef6eccecbdef3bb8ce629cabfb5550c1db5c3e952943dda1786ef6cb470a8c23",
"h1:PexyQBRLDA+SR+sWlzYBZswry5O5h/tTfj87CaECtLc=",
"zh:1806830a3cf103e65e772a7d28fd4df2788c29a029fb2def1326bc777ad107ed",
"zh:252be544fb4c9daf09cad7d3776daf5fa66b62740d3ea9d6d499a7b1697c3433",
"zh:50985fe02a8e5ae47c75d7c28c911b25d7dc4716cff2ed55ca05889ab77a1f73",
"zh:54cf0ec90538703c66937c77e8d72a38d5af47437eb0b8b55eb5836c5d288878",
"zh:704f536c621337e06fffef6d5f49ac81f52d249f937250527c12884cb83aefed",
"zh:896d8ef6d0b555299f124eb25bce8a17d735da14ef21f07582098d301f47da30",
"zh:976277a85b0a0baafe267cc494f766448d1da5b6936ddcb3ce393bd4d22f08d2",
"zh:c7faa9a2b11bc45833a3e8e340f22f1ecf01597eaeffa7669234b4549d7dfa85",
"zh:caf851ef9c8ce482864badf7058f9278d4537112fa236efd8f1a9315801d9061",
"zh:db203435d58b0ac842540861b3307a623423275d85754c171773f3b210ae5b24",
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
"zh:f710a37190429045d109edd35de69db3b5f619919c2fa04c77a3a639fea9fd7d",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.17"
hashes = [
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
]
}
provider "registry.terraform.io/hashicorp/vault" {
version = "3.7.0"
hashes = [
"h1:idawLPCbZgHIb+NRLJs4YdIcQgACqYiT5VwQfChkn+w=",
"zh:256b82692c560c76ad51414a2c003cadfa10338a9df333dbe22dd14a9ed16f95",
"zh:329ed8135a98bd6a000d014e40bc5981c6868cf50eedf454f1a1f72ac463bdf0",
"zh:3b32c18b492a6ac8e1ccac40d28cd42a88892ef8f3515291676136e3faac351c",
"zh:4c5ea8e80543b36b1999257a41c8b9cde852542251de82a94cff2f9d280ac2ec",
"zh:5d968ed305cde7aa3567a943cb2f5f8def54b40a2292b66027b1405a1cf28585",
"zh:60226d1a0a496a9a6c1d646800dd7e1bd1c4f5527e7307ff0bca9f4d0b5395e2",
"zh:71b11def501c994ee5305f24bd47ebfcca2314c5acca3efcdd209373d0068ac0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:89be6b5db3be473bfd14422a9abf83245c4b22ce47a8fe463bbebf8e20958ab1",
"zh:8f91051d43ae309bb8f3f6a9659f0fd26b1b239faf671c139b4e9ad0d208db05",
"zh:b5114983273d3170878f657b92738b2c40953aedeef2e1840588ecaf1bc0827e",
"zh:fd56db01c5444dc8ca2e0ad2f13fc4c17735d0fdeb5960e23176fb3f5a5114d3",
]
}

View File

@ -1,12 +1,6 @@
namespace "*" {
policy = "write"
capabilities = ["alloc-node-exec"]
variables {
path "*" {
capabilities = ["write", "read", "destroy"]
}
}
}
agent {

View File

@ -1,17 +0,0 @@
resource "nomad_acl_role" "admin" {
name = "admin"
description = "Nomad administrators"
policy {
name = nomad_acl_policy.admin.name
}
}
resource "nomad_acl_role" "deploy" {
name = "deploy"
description = "Authorized to conduct deployments and view logs"
policy {
name = nomad_acl_policy.deploy.name
}
}

48
acls/nomad_vault.tf Normal file
View File

@ -0,0 +1,48 @@
# Set up nomad provider in vault for Nomad ACLs
resource "nomad_acl_token" "vault" {
name = "vault"
type = "management"
}
resource "vault_nomad_secret_backend" "config" {
backend = "nomad"
description = "Nomad ACL"
token = nomad_acl_token.vault.secret_id
default_lease_ttl_seconds = "3600"
max_lease_ttl_seconds = "7200"
ttl = "3600"
max_ttl = "7200"
}
# Vault roles generating Nomad tokens
resource "vault_nomad_secret_role" "nomad-deploy" {
backend = vault_nomad_secret_backend.config.backend
role = "nomad-deploy"
# Nomad policies
policies = ["deploy"]
}
resource "vault_nomad_secret_role" "admin-management" {
backend = vault_nomad_secret_backend.config.backend
role = "admin-management"
type = "management"
}
resource "vault_nomad_secret_role" "admin" {
backend = vault_nomad_secret_backend.config.backend
role = "admin"
# Nomad policies
policies = ["admin"]
}
# Nomad Vault token access
resource "vault_token_auth_backend_role" "nomad-cluster" {
role_name = "nomad-cluster"
token_explicit_max_ttl = 0
allowed_policies = ["access-tables", "nomad-task"]
orphan = true
token_period = 259200
renewable = true
}

17
acls/nomad_vault_db.tf Normal file
View File

@ -0,0 +1,17 @@
# resource "vault_mount" "db" {
# path = "database"
# type = "database"
# }
#
# resource "vault_database_secret_backend_connection" "mysql" {
# backend = vault_mount.db.path
# name = "mysql"
# allowed_roles = ["accessdb"]
#
# mysql {
# # How to give access here?
# connection_url = "{{username}}:{{password}}@tcp(mysql-server.service.consul:3306)"
# username = ""
# password = ""
# }
# }

View File

@ -1,6 +1,38 @@
# Configure Consul provider
provider "consul" {
address = var.consul_address
}
# Get Nomad client from Consul
data "consul_service" "nomad" {
name = "nomad-client"
}
# Get Vault client from Consul
data "consul_service" "vault" {
name = "vault"
tag = "active"
}
locals {
# Get Nomad address from Consul
nomad_node = data.consul_service.nomad.service[0]
nomad_node_address = "http://${local.nomad_node.node_address}:${local.nomad_node.port}"
# Get Vault address from Consul
vault_node = data.consul_service.vault.service[0]
vault_node_address = "http://${local.vault_node.node_address}:${local.vault_node.port}"
}
# Configure the Nomad provider
provider "nomad" {
address = var.nomad_address
address = local.nomad_node_address
secret_id = var.nomad_secret_id
region = "global"
}
# Configure the Vault provider
provider "vault" {
address = local.vault_node_address
token = var.vault_token
}

View File

@ -1,3 +1,8 @@
variable "consul_address" {
type = string
default = "http://n1.thefij:8500"
}
variable "nomad_secret_id" {
type = string
description = "Secret ID for ACL bootstrapped Nomad"
@ -5,7 +10,8 @@ variable "nomad_secret_id" {
default = ""
}
variable "nomad_address" {
type = string
default = "http://n1.thefij:4646"
variable "vault_token" {
type = string
sensitive = true
default = ""
}

8
acls/vault_login.tf Normal file
View File

@ -0,0 +1,8 @@
resource "vault_auth_backend" "userpass" {
type = "userpass"
tune {
max_lease_ttl = "1h"
listing_visibility = "unauth"
}
}

83
acls/vault_policies.tf Normal file
View File

@ -0,0 +1,83 @@
resource "vault_policy" "admin" {
name = "admin"
policy = <<EOF
path "*" {
capabilities = ["create", "read", "update", "delete", "list", "sudo"]
}
EOF
}
resource "vault_policy" "nomad-deploy" {
name = "nomad-deploy"
policy = <<EOH
path "nomad/creds/nomad-deploy" {
capabilities = ["read"]
}
EOH
}
# Policy for clusters
resource "vault_policy" "nomad-task" {
name = "nomad-task"
policy = <<EOH
path "kv/data/*" {
# Does this need create, update, delete?
capabilities = ["create", "read", "update", "delete", "list"]
}
EOH
}
# Policy for nomad tokens
resource "vault_policy" "nomad-server" {
name = "nomad-server"
policy = <<EOH
# Allow creating tokens under "nomad-cluster" token role. The token role name
# should be updated if "nomad-cluster" is not used.
path "auth/token/create/nomad-cluster" {
capabilities = ["update"]
}
# Allow looking up "nomad-cluster" token role. The token role name should be
# updated if "nomad-cluster" is not used.
path "auth/token/roles/nomad-cluster" {
capabilities = ["read"]
}
# Allow looking up the token passed to Nomad to validate # the token has the
# proper capabilities. This is provided by the "default" policy.
path "auth/token/lookup-self" {
capabilities = ["read"]
}
# Allow looking up incoming tokens to validate they have permissions to access
# the tokens they are requesting. This is only required if
# `allow_unauthenticated` is set to false.
path "auth/token/lookup" {
capabilities = ["update"]
}
# Allow revoking tokens that should no longer exist. This allows revoking
# tokens for dead tasks.
path "auth/token/revoke-accessor" {
capabilities = ["update"]
}
# Allow checking the capabilities of our own token. This is used to validate the
# token upon startup.
path "sys/capabilities-self" {
capabilities = ["update"]
}
# Allow our own token to be renewed.
path "auth/token/renew-self" {
capabilities = ["update"]
}
# This section grants all access on "secret/*". Further restrictions can be
# applied to this broad policy, as shown below.
path "kv/data/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
EOH
}

View File

@ -1,7 +0,0 @@
[defaults]
inventory=ansible_playbooks/ansible_hosts.yml
collections_paths=ansible_galaxy
roles_path=ansible_galaxy/roles
[inventory]
enable_plugins=yaml

72
ansible_hosts.yml Normal file
View File

@ -0,0 +1,72 @@
---
all:
children:
servers:
hosts:
n1.thefij:
# consul_node_role: bootstrap
nomad_node_role: both
nomad_unique_host_volumes:
- name: mysql-data
path: /srv/volumes/mysql
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: postgres-data
path: /srv/volumes/postgres
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: lldap-data
path: /srv/volumes/lldap
owner: "root"
group: "bin"
mode: "0755"
read_only: false
n2.thefij:
nomad_node_class: ingress
nomad_node_role: both
nomad_unique_host_volumes:
- name: nextcloud-data
path: /srv/volumes/nextcloud
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: gitea-data
path: /srv/volumes/gitea
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: authentik-data
path: /srv/volumes/gitea
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: immich-upload # TODO: Use NFS instead
path: /srv/volumes/immich-upload
owner: "root"
group: "bin"
mode: "0755"
read_only: false
# n3.thefij:
# nomad_node_class: ingress
# nomad_node_role: both
# pi3:
# nomad_node_role: client
# pi4:
# nomad_node_role: client
consul_instances:
children:
servers: {}
vault_instances:
children:
servers: {}
nomad_instances:
children:
servers: {}

View File

@ -1,72 +0,0 @@
---
all:
hosts:
n1.thefij:
nomad_node_class: ingress
nomad_reserved_memory: 1024
# nomad_meta:
# hw_transcode.device: /dev/dri
# hw_transcode.type: intel
nfs_mounts:
- src: 10.50.250.2:/srv/volumes
path: /srv/volumes/moxy
opts: proto=tcp,rw
nomad_unique_host_volumes:
- name: mysql-data
path: /srv/volumes/mysql
owner: "999"
group: "100"
mode: "0755"
read_only: false
- name: postgres-data
path: /srv/volumes/postgres
owner: "999"
group: "999"
mode: "0755"
read_only: false
n2.thefij:
nomad_node_class: ingress
nomad_reserved_memory: 1024
nfs_mounts:
- src: 10.50.250.2:/srv/volumes
path: /srv/volumes/moxy
opts: proto=tcp,rw
nomad_unique_host_volumes:
- name: nextcloud-data
path: /srv/volumes/nextcloud
owner: "root"
group: "bin"
mode: "0755"
read_only: false
pi4:
nomad_node_class: ingress
nomad_reserved_memory: 512
nomad_meta:
hw_transcode.device: /dev/video11
hw_transcode.type: raspberry
qnomad.thefij:
ansible_host: 192.168.2.234
nomad_reserved_memory: 1024
# This VM uses a non-standard interface
nomad_network_interface: ens3
nomad_instances:
vars:
nomad_network_interface: eth0
children:
nomad_servers: {}
nomad_clients: {}
nomad_servers:
hosts:
nonopi.thefij:
ansible_host: 192.168.2.170
n1.thefij: {}
n2.thefij: {}
pi4: {}
qnomad.thefij: {}
nomad_clients:
hosts:
n1.thefij: {}
n2.thefij: {}
pi4: {}
qnomad.thefij: {}

View File

@ -1,27 +0,0 @@
- name: Restart nomad and wesher
hosts: nomad_instances
tasks:
- name: Stop Nomad
systemd:
name: nomad
state: stopped
become: true
- name: Restart wesher
systemd:
name: wesher
state: restarted
become: true
- name: Start Docker
systemd:
name: docker
state: started
become: true
- name: Start Nomad
systemd:
name: nomad
state: started
become: true

View File

@ -1,49 +0,0 @@
---
- name: Recover Nomad
hosts: nomad_servers
any_errors_fatal: true
tasks:
- name: Stop Nomad
systemd:
name: nomad
state: stopped
become: true
- name: Remount all shares
command: mount -a
become: true
- name: Get node-id
slurp:
src: /var/nomad/server/node-id
register: nomad_node_id
become: true
- name: Node Info
debug:
msg: |
node_id: {{ nomad_node_id.content | b64decode }}
address: {{ ansible_default_ipv4.address }}
- name: Save
copy:
dest: /var/nomad/server/raft/peers.json
# I used to have reject('equalto', inventory_hostname) in the loop, but I'm not sure if I should
content: |
[
{% for host in ansible_play_hosts -%}
{
"id": "{{ hostvars[host].nomad_node_id.content | b64decode }}",
"address": "{{ hostvars[host].ansible_default_ipv4.address }}:4647",
"non_voter": false
}{% if not loop.last %},{% endif %}
{% endfor -%}
]
become: true
- name: Restart Nomad
systemd:
name: nomad
state: restarted
become: true

View File

@ -1,364 +0,0 @@
---
- name: Update DNS for bootstrapping with non-Nomad host
hosts: nomad_instances
become: true
gather_facts: false
vars:
non_nomad_dns: 192.168.2.170
tasks:
- name: Add non-nomad bootstrap DNS
lineinfile:
dest: /etc/resolv.conf
create: true
line: "nameserver {{ non_nomad_dns }}"
- name: Install Docker
hosts: nomad_clients
become: true
vars:
docker_architecture_map:
x86_64: amd64
armv7l: armhf
aarch64: arm64
docker_apt_arch: "{{ docker_architecture_map[ansible_architecture] }}"
docker_compose_arch: "{{ (ansible_architecture == 'armv7l') | ternary('armv7', ansible_architecture) }}"
roles:
- geerlingguy.docker
tasks:
- name: Remove snapd
package:
name: snapd
state: absent
# Not on Ubuntu 20.04
# - name: Install Podman
# hosts: nomad_instances
# become: true
#
# tasks:
# - name: Install Podman
# package:
# name: podman
# state: present
- name: Create NFS mounts
hosts: nomad_clients
become: true
vars:
shared_nfs_mounts:
- src: 192.168.2.10:/Media
path: /srv/volumes/media-read
opts: proto=tcp,port=2049,ro
- src: 192.168.2.10:/Media
path: /srv/volumes/media-write
opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Photos
path: /srv/volumes/photos
opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Container
path: /srv/volumes/nas-container
opts: proto=tcp,port=2049,rw
tasks:
- name: Install nfs
package:
name: nfs-common
state: present
- name: Mount NFS volumes
ansible.posix.mount:
src: "{{ item.src }}"
path: "{{ item.path }}"
opts: "{{ item.opts }}"
state: mounted
fstype: nfs4
loop: "{{ shared_nfs_mounts + (nfs_mounts | default([])) }}"
- import_playbook: wesher.yml
- name: Build Nomad cluster
hosts: nomad_instances
any_errors_fatal: true
become: true
vars:
shared_host_volumes:
- name: media-read
path: /srv/volumes/media-write
read_only: true
- name: media-write
path: /srv/volumes/media-write
owner: "root"
group: "root"
mode: "0755"
read_only: false
- name: media-downloads
path: /srv/volumes/media-write/Downloads
read_only: false
- name: sabnzbd-config
path: /srv/volumes/media-write/Downloads/sabnzbd
read_only: false
- name: photoprism-media
path: /srv/volumes/photos/Photoprism
read_only: false
- name: photoprism-storage
path: /srv/volumes/nas-container/photoprism
read_only: false
- name: nzbget-config
path: /srv/volumes/nas-container/nzbget
read_only: false
- name: sonarr-config
path: /srv/volumes/nas-container/sonarr
read_only: false
- name: lidarr-config
path: /srv/volumes/nas-container/lidarr
read_only: false
- name: radarr-config
path: /srv/volumes/nas-container/radarr
read_only: false
- name: bazarr-config
path: /srv/volumes/nas-container/bazarr
read_only: false
- name: gitea-data
path: /srv/volumes/nas-container/gitea
read_only: false
- name: all-volumes
path: /srv/volumes
owner: "root"
group: "root"
mode: "0755"
read_only: false
roles:
- name: ansible-nomad
vars:
nomad_version: "1.7.6-1"
nomad_install_upgrade: true
nomad_allow_purge_config: true
nomad_node_role: "{% if 'nomad_clients' in group_names %}{% if 'nomad_servers' in group_names %}both{% else %}client{% endif %}{% else %}server{% endif %}"
# Where nomad gets installed to
nomad_bin_dir: /usr/bin
nomad_install_from_repo: true
nomad_bootstrap_expect: "{{ [(play_hosts | length), 3] | min }}"
nomad_raft_protocol: 3
nomad_autopilot: true
nomad_encrypt_enable: true
# nomad_use_consul: true
# Metrics
nomad_telemetry: true
nomad_telemetry_prometheus_metrics: true
nomad_telemetry_publish_allocation_metrics: true
nomad_telemetry_publish_node_metrics: true
# Enable container plugins
nomad_cni_enable: true
nomad_cni_version: 1.0.1
nomad_docker_enable: true
nomad_docker_dmsetup: false
# nomad_podman_enable: true
# Merge shared host volumes with node volumes
nomad_host_volumes: "{{ shared_host_volumes + (nomad_unique_host_volumes | default([])) }}"
# Customize docker plugin
nomad_plugins:
docker:
config:
allow_privileged: true
gc:
image_delay: "24h"
volumes:
enabled: true
selinuxlabel: "z"
# Send logs to journald so we can scrape them for Loki
# logging:
# type: journald
extra_labels:
- "job_name"
- "job_id"
- "task_group_name"
- "task_name"
- "namespace"
- "node_name"
- "node_id"
# Bind nomad
nomad_bind_address: 0.0.0.0
# Default interface for binding tasks
# This is now set at the inventory level
# nomad_network_interface: eth0
# Create networks for binding task ports
nomad_host_networks:
- name: loopback
interface: lo
reserved_ports: "22"
- name: wesher
interface: wgoverlay
reserved_ports: "22"
# Enable ACLs
nomad_acl_enabled: true
nomad_config_custom:
ui:
enabled: true
- name: Bootstrap Nomad ACLs and scheduler
hosts: nomad_servers
tasks:
- name: Start Nomad
systemd:
state: started
name: nomad
- name: Nomad API reachable?
uri:
url: "http://127.0.0.1:4646/v1/status/leader"
method: GET
status_code: 200
register: nomad_check_result
retries: 8
until: nomad_check_result is succeeded
delay: 15
changed_when: false
run_once: true
- name: Bootstrap ACLs
command:
argv:
- "nomad"
- "acl"
- "bootstrap"
- "-json"
run_once: true
ignore_errors: true
register: bootstrap_result
changed_when: bootstrap_result is succeeded
- name: Save bootstrap result
copy:
content: "{{ bootstrap_result.stdout }}"
dest: "../nomad_bootstrap.json"
when: bootstrap_result is succeeded
delegate_to: localhost
run_once: true
- name: Read secret
command:
argv:
- jq
- -r
- .SecretID
- ../nomad_bootstrap.json
delegate_to: localhost
run_once: true
no_log: true
changed_when: false
register: read_secretid
- name: Look for policy
command:
argv:
- nomad
- acl
- policy
- list
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
register: policies
run_once: true
changed_when: false
- name: Copy policy
copy:
src: ../acls/nomad-anon-policy.hcl
dest: /tmp/anonymous.policy.hcl
delegate_to: "{{ play_hosts[0] }}"
run_once: true
register: anon_policy
- name: Create anon-policy
command:
argv:
- nomad
- acl
- policy
- apply
- -description="Anon read only"
- anonymous
- /tmp/anonymous.policy.hcl
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
when: policies.stdout == "No policies found" or anon_policy.changed
delegate_to: "{{ play_hosts[0] }}"
run_once: true
- name: Read scheduler config
command:
argv:
- nomad
- operator
- scheduler
- get-config
- -json
run_once: true
register: scheduler_config
changed_when: false
- name: Enable service scheduler preemption
command:
argv:
- nomad
- operator
- scheduler
- set-config
- -preempt-service-scheduler=true
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
run_once: true
when: (scheduler_config.stdout | from_json)["SchedulerConfig"]["PreemptionConfig"]["ServiceSchedulerEnabled"] is false
- name: Enable system scheduler preemption
command:
argv:
- nomad
- operator
- scheduler
- set-config
- -preempt-system-scheduler=true
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
run_once: true
when: (scheduler_config.stdout | from_json)["SchedulerConfig"]["PreemptionConfig"]["SystemSchedulerEnabled"] is false
# - name: Set up Nomad backend and roles in Vault
# community.general.terraform:
# project_path: ../acls
# force_init: true
# variables:
# consul_address: "{{ play_hosts[0] }}:8500"
# vault_token: "{{ root_token }}"
# nomad_secret_id: "{{ read_secretid.stdout }}"
# delegate_to: localhost
# run_once: true
# notify:
# - Restart Nomad
handlers:
- name: Restart Nomad
systemd:
state: restarted
name: nomad
retries: 6
delay: 5

View File

@ -1,10 +0,0 @@
---
- name: Stop Nomad
hosts: nomad_instances
tasks:
- name: Stop Nomad
systemd:
name: nomad
state: stopped
become: true

View File

@ -1,159 +0,0 @@
nomad/jobs:
base_hostname: VALUE
db_user_ro: VALUE
ldap_base_dn: VALUE
notify_email: VALUE
nomad/jobs/authelia:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
email_sender: VALUE
jwt_secret: VALUE
oidc_clients: VALUE
oidc_hmac_secret: VALUE
oidc_issuer_certificate_chain: VALUE
oidc_issuer_private_key: VALUE
session_secret: VALUE
storage_encryption_key: VALUE
nomad/jobs/authelia/authelia/stunnel:
redis_stunnel_psk: VALUE
nomad/jobs/backup:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-n1:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-n2:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-pi4:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/bazarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/blocky:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
mappings: VALUE
whitelists_ads: VALUE
nomad/jobs/blocky/blocky/stunnel:
redis_stunnel_psk: VALUE
nomad/jobs/ddclient:
domain: VALUE
domain_ddclient: VALUE
zone: VALUE
nomad/jobs/diun:
slack_hook_url: VALUE
nomad/jobs/git:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
oidc_secret: VALUE
secret_key: VALUE
smtp_sender: VALUE
nomad/jobs/grafana:
admin_pw: VALUE
alert_email_addresses: VALUE
db_name: VALUE
db_pass: VALUE
db_pass_ro: VALUE
db_user: VALUE
db_user_ro: VALUE
minio_access_key: VALUE
minio_secret_key: VALUE
oidc_secret: VALUE
slack_bot_token: VALUE
slack_bot_url: VALUE
slack_hook_url: VALUE
smtp_password: VALUE
smtp_user: VALUE
nomad/jobs/immich:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/lego:
acme_email: VALUE
domain_lego_dns: VALUE
usersfile: VALUE
nomad/jobs/lidarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/lldap:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
jwt_secret: VALUE
key_seed: VALUE
smtp_from: VALUE
smtp_reply_to: VALUE
nomad/jobs/minitor:
mailgun_api_key: VALUE
nomad/jobs/mysql-server:
mysql_root_password: VALUE
nomad/jobs/photoprism:
admin_password: VALUE
admin_user: VALUE
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/postgres-server:
superuser: VALUE
superuser_pass: VALUE
nomad/jobs/radarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/redis-authelia:
allowed_psks: VALUE
nomad/jobs/redis-blocky:
allowed_psks: VALUE
nomad/jobs/rediscommander:
redis_stunnel_psk: VALUE
nomad/jobs/sonarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/traefik:
external: VALUE
usersfile: VALUE
nomad/jobs/unifi-traffic-route-ips:
unifi_password: VALUE
unifi_username: VALUE
nomad/oidc:
secret: VALUE
secrets/ldap:
admin_email: VALUE
admin_password: VALUE
admin_user: VALUE
secrets/mysql:
mysql_root_password: VALUE
secrets/postgres:
superuser: VALUE
superuser_pass: VALUE
secrets/smtp:
password: VALUE
port: VALUE
server: VALUE
tls: VALUE
user: VALUE

View File

@ -1,2 +0,0 @@
---
wesher_key: "{{ vault_wesher_key }}"

View File

@ -1,53 +0,0 @@
- name: Create overlay network
hosts: nomad_instances
become: true
vars_files:
- vars/wesher_vars.yml
- vars/vault_wesher_vars.yml
vars:
wesher_key: "{{ wesher_key }}"
wesher_version: v0.2.6
wesher_arch_map:
x86_64: amd64
armv7l: arm
aarch64: arm64
wesher_arch: "{{ wesher_arch_map[ansible_architecture] }}"
# wesher_sha256_map:
# x86_64: 8c551ca211d7809246444765b5552a8d1742420c64eff5677d1e27a34c72aeef
# armv7l: 97f5bbf2b00b8b11a4ca224540bf9c1affdb15432c3b6ad8da4c1a7b6175eb5d
# aarch64: 507c6397d67ea90bddb3e1c06ec9d8e38d4342ed6f0f6b47855fecc9f1d6fae0
# wesher_checksum: sha256:{{ wesher_sha256_map[ansible_architecture] }}
wesher_checksum: sha256:https://github.com/costela/wesher/releases/download/{{ wesher_version }}/wesher.sha256sums
tasks:
- name: Download wesher
get_url:
url: https://github.com/costela/wesher/releases/download/{{ wesher_version }}/wesher-{{ wesher_arch }}
dest: /usr/local/sbin/wesher
checksum: "{{ wesher_checksum }}"
owner: root
mode: "0755"
- name: Install systemd unit
get_url:
url: https://github.com/costela/wesher/raw/{{ wesher_version }}/dist/wesher.service
dest: /etc/systemd/system/wesher.service
- name: Write wesher config
lineinfile:
path: /etc/default/wesher
create: true
regexp: "^{{ item.split('=')[0] }}"
line: "{{ item }}"
owner: root
mode: "0600"
loop:
- WESHER_CLUSTER_KEY={{ wesher_key }}
- WESHER_JOIN={% for host in ansible_play_hosts %}{{ hostvars[host].ansible_default_ipv4.address }}{% if not loop.last %},{% endif %}{% endfor %}
- name: Start wesher
systemd:
name: wesher.service
daemon_reload: true
state: started
enabled: true

View File

@ -2,39 +2,20 @@
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.0.0"
version = "1.4.16"
hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}

View File

@ -1,6 +1,5 @@
job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
datacenters = ["dc1"]
priority = 90
%{ if batch_node == null ~}
type = "system"
%{ else ~}
@ -18,7 +17,14 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
}
%{ endif ~}
%{ if batch_node != null ~}
%{ if batch_node == null ~}
constraint {
attribute = "$${node.unique.name}"
operator = "set_contains_any"
# Only deploy to nodes running tasks to backup
value = "n1,n2"
}
%{ else ~}
constraint {
attribute = "$${node.unique.name}"
value = "${batch_node}"
@ -31,9 +37,6 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
mode = "bridge"
port "metrics" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 8080
}
}
@ -44,26 +47,42 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
source = "all-volumes"
}
ephemeral_disk {
# Try to keep restic cache intact
sticky = true
}
service {
name = "backup"
provider = "nomad"
port = "metrics"
tags = [
"prometheus.scrape"
]
# Add connect to mysql
connect {
sidecar_service {
proxy {
local_service_port = 8080
upstreams {
destination_name = "mysql-server"
local_bind_port = 6060
}
config {
protocol = "tcp"
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
meta {
metrics_addr = "$${NOMAD_ADDR_metrics}"
}
}
task "backup" {
driver = "docker"
shutdown_delay = "5m"
volume_mount {
volume = "all-volumes"
destination = "/data"
@ -71,171 +90,93 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
}
config {
image = "iamthefij/resticscheduler:0.4.0"
image = "iamthefij/resticscheduler"
ports = ["metrics"]
args = [
%{ if batch_node != null ~}
"-once",
"-$${NOMAD_META_task}",
"$${NOMAD_META_job_name}",
"--snapshot",
"$${NOMAD_META_snapshot}",
"--push-gateway",
"http://pushgateway.nomad:9091",
%{ endif ~}
"$${NOMAD_TASK_DIR}/node-jobs.hcl",
"/jobs/node-jobs.hcl",
]
mount {
type = "bind"
target = "/jobs"
source = "jobs"
}
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
action "unlockenv" {
command = "sh"
args = ["-c", "/bin/resticscheduler -once -unlock all $${NOMAD_TASK_DIR}/node-jobs.hcl"]
}
action "unlocktmpl" {
command = "/bin/resticscheduler"
args = ["-once", "-unlock", "all", "{{ env 'NOMAD_TASK_DIR' }}/node-jobs.hcl"]
}
action "unlockhc" {
command = "/bin/resticscheduler"
args = ["-once", "-unlock", "all", "/local/node-jobs.hcl"]
}
env = {
RCLONE_CHECKERS = "2"
RCLONE_TRANSFERS = "2"
RCLONE_FTP_CONCURRENCY = "5"
RESTIC_CACHE_DIR = "$${NOMAD_ALLOC_DIR}/data"
TZ = "America/Los_Angeles"
"MYSQL_HOST" = "$${NOMAD_UPSTREAM_IP_mysql_server}"
"MYSQL_PORT" = "$${NOMAD_UPSTREAM_PORT_mysql_server}"
}
template {
# Probably want to use database credentials that have access to dump all tables
data = <<EOF
MYSQL_HOST=127.0.0.1
MYSQL_PORT=3306
{{ with nomadVar "secrets/mysql" }}
MYSQL_USER=root
MYSQL_PASSWORD={{ .mysql_root_password }}
{{ with secret "kv/data/nextcloud" -}}
MYSQL_DATABASE={{ .Data.data.db_name }}
MYSQL_USER={{ .Data.data.db_user }}
MYSQL_PASSWORD={{ .Data.data.db_pass }}
{{ end -}}
{{ with nomadVar "secrets/postgres" }}
POSTGRES_HOST=127.0.0.1
POSTGRES_PORT=5432
POSTGRES_USER={{ .superuser }}
POSTGRES_PASSWORD={{ .superuser_password }}
{{ end -}}
{{ with nomadVar (print "nomad/jobs/" (index (env "NOMAD_JOB_ID" | split "/") 0)) -}}
BACKUP_PASSPHRASE={{ .backup_passphrase }}
RCLONE_FTP_HOST={{ .nas_ftp_host }}
RCLONE_FTP_USER={{ .nas_ftp_user }}
RCLONE_FTP_PASS={{ .nas_ftp_pass.Value | toJSON }}
{{ with secret "kv/data/backups" -}}
BACKUP_PASSPHRASE={{ .Data.data.backup_passphrase }}
RCLONE_FTP_HOST={{ .Data.data.nas_ftp_host }}
RCLONE_FTP_USER={{ .Data.data.nas_ftp_user }}
RCLONE_FTP_PASS={{ .Data.data.nas_ftp_pass | toJSON }}
RCLONE_FTP_EXPLICIT_TLS=true
RCLONE_FTP_NO_CHECK_CERTIFICATE=true
AWS_ACCESS_KEY_ID={{ .nas_minio_access_key_id }}
AWS_SECRET_ACCESS_KEY={{ .nas_minio_secret_access_key }}
{{ end -}}
EOF
destination = "secrets/db.env"
env = true
}
template {
data = <<EOH
CONSUL_HTTP_ADDR={{ env "attr.unique.network.ip-address" }}:8500
EOH
destination = "local/consul.env"
env = true
}
template {
# Build jobs based on node
data = <<EOF
# Current node is {{ env "node.unique.name" }} {{ env "node.unique.id" }}
%{ for job_file in fileset(module_path, "jobs/*.hcl") ~}
{{ range nomadService 1 "backups" "${trimsuffix(basename(job_file), ".hcl")}" -}}
# ${trimsuffix(basename(job_file), ".hcl")} .Node {{ .Node }}
{{ if eq .Node (env "node.unique.id") -}}
${file("${module_path}/${job_file}")}
# Current node is {{ env "node.unique.name" }}
{{ if eq (env "node.unique.name") "n2" -}}
# Consul backup
${file("${module_path}/jobs/consul.hcl")}
{{ end -}}
{{ range service "nextcloud" -}}
# Nextcloud .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/nextcloud.hcl")}
{{ end -}}
{{ end -}}
{{ range service "lldap" -}}
# Lldap .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/lldap.hcl")}
{{ end -}}
{{ end -}}
%{ endfor ~}
# Dummy job to keep task healthy on node without any stateful services
job "Dummy" {
schedule = "@daily"
config {
repo = "/local/dummy-repo"
passphrase = env("BACKUP_PASSPHRASE")
}
backup {
paths = ["/local/node-jobs.hcl"]
}
forget {
KeepLast = 1
}
}
EOF
destination = "local/node-jobs.hcl"
destination = "jobs/node-jobs.hcl"
}
resources {
cpu = 50
memory = 500
}
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" }}
connect = {{ .Address }}:{{ .Port }}
{{ end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
[postgres_client]
client = yes
accept = 127.0.0.1:5432
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "postgres-tls" }}
connect = {{ .Address }}:{{ .Port }}
{{ end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/postgres_stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/backups" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
template {
data = <<EOF
{{- with nomadVar "secrets/postgres/allowed_psks/backups" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/postgres_stunnel_psk.txt"
memory = 256
}
}
}

View File

@ -1,136 +1,14 @@
resource "nomad_job" "backup" {
resource "nomad_job" "backups" {
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
module_path = "${path.module}",
batch_node = null,
use_wesher = var.use_wesher
})
}
resource "nomad_job" "backup-oneoff" {
# TODO: Get list of nomad hosts dynamically
for_each = toset(["n1", "n2", "pi4"])
# for_each = toset([
# for node in data.consul_service.nomad.service :
# node.node_name
# ])
resource "nomad_job" "backups-oneoff" {
for_each = toset(["n1", "n2", "n3"])
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
module_path = "${path.module}",
batch_node = each.key,
use_wesher = var.use_wesher
})
}
locals {
# NOTE: This can't be dynamic in first deploy since these values are not known
# all_job_ids = toset(flatten([[for job in resource.nomad_job.backup-oneoff : job.id], [resource.nomad_job.backup.id]]))
all_job_ids = toset(["backup", "backup-oneoff-n1", "backup-oneoff-n2", "backup-oneoff-pi4"])
}
resource "nomad_acl_policy" "secrets_mysql" {
for_each = local.all_job_ids
name = "${each.key}-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
}
}
resource "random_password" "mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "mysql_psk" {
path = "secrets/mysql/allowed_psks/backups"
items = {
psk = "backups:${resource.random_password.mysql_psk.result}"
}
}
resource "nomad_acl_policy" "mysql_psk" {
for_each = local.all_job_ids
name = "${each.key}-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/backups" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
group = "backup"
task = "stunnel"
}
}
resource "nomad_acl_policy" "secrets_postgres" {
for_each = local.all_job_ids
name = "${each.key}-secrets-postgres"
description = "Give access to Postgres secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
}
}
resource "random_password" "postgres_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "postgres_psk" {
path = "secrets/postgres/allowed_psks/backups"
items = {
psk = "backups:${resource.random_password.postgres_psk.result}"
}
}
resource "nomad_acl_policy" "postgres_psk" {
for_each = local.all_job_ids
name = "${each.key}-secrets-postgres-psk"
description = "Give access to Postgres PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres/allowed_psks/backups" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
group = "backup"
task = "stunnel"
}
}

View File

@ -1,54 +0,0 @@
job "authelia" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/authelia"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local authelia dir" {
pre_script {
on_backup = "mkdir -p /local/authelia"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "authelia"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/authelia/dump.sql"
}
}
backup {
paths = ["/local/authelia"]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

33
backups/jobs/consul.hcl Normal file
View File

@ -0,0 +1,33 @@
job "Consul" {
schedule = "0 * * * *"
config {
repo = "rclone::ftp,env_auth:/nomad/consul"
passphrase = env("BACKUP_PASSPHRASE")
}
task "Use consul snapshots" {
pre_script {
on_backup = "mkdir -p /local/consul"
}
pre_script {
on_backup = "consul snapshot save /local/consul/backup.snap"
}
post_script {
on_restore = "consul snapshot restore /local/consul/backup.snap"
}
}
backup {
paths = ["/local/consul"]
# Because path is absolute
restore_opts {
Target = "/"
}
}
forget {
KeepLast = 2
Prune = true
}
}

View File

@ -1,57 +0,0 @@
job "git" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/gitea"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local gitea dir" {
pre_script {
on_backup = "mkdir -p /local/gitea"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "gitea"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/gitea/dump.sql"
}
}
backup {
paths = [
"/local/gitea",
"/data/nas-container/gitea",
]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,54 +0,0 @@
job "grafana" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/grafana"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local grafana dir" {
pre_script {
on_backup = "mkdir -p /local/grafana"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "grafana"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/grafana/dump.sql"
}
}
backup {
paths = ["/local/grafana"]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,64 +0,0 @@
job "lidarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/lidarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "lidarr"
no_tablespaces = true
dump_to = "/data/nas-container/lidarr/Backups/dump-lidarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "lidarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/lidarr/Backups/dump-lidarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/lidarr"]
backup_opts {
Exclude = [
"lidarr_backup_*.zip",
"/data/nas-container/lidarr/MediaCover",
"/data/nas-container/lidarr/logs",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -2,52 +2,26 @@ job "lldap" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/lldap"
repo = "rclone::ftp,env_auth:/nomad/lldap"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local backup dir" {
pre_script {
on_backup = "mkdir -p /local/lldap"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
database = "lldap"
no_tablespaces = true
dump_to = "/local/lldap/dump.sql"
}
}
# sqlite "Backup database" {
# path = "/data/lldap/users.db"
# # sqlite3 /data/lldap/users.db .backup /data/lldap/users.db.bak
# dump_to = "/data/lldap/users.db.bak"
# }
backup {
paths = ["/local/lldap"]
backup_opts {
Host = "nomad"
}
paths = ["/data/lldap"]
# Because path is absolute
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -0,0 +1,31 @@
job "Nextcloud" {
schedule = "0 * * * *"
config {
repo = "rclone::ftp,env_auth:/nomad/nextcloud"
passphrase = env("BACKUP_PASSPHRASE")
}
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = env("MYSQL_DATABASE")
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/dump.sql"
}
backup {
paths = ["/data/nextcloud"]
# Because path is absolute
restore_opts {
Target = "/"
}
}
forget {
KeepLast = 2
Prune = true
}
}

View File

@ -1,41 +0,0 @@
job "nzbget" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/nzbget"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
backup {
paths = [
# Configuration
"/data/nas-container/nzbget",
# Queued nzb files
"/data/media-write/Downloads/nzb",
]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,60 +0,0 @@
job "photoprism" {
schedule = "10 * * * *"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/photoprism"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local photoprism dir" {
pre_script {
on_backup = "mkdir -p /local/photoprism"
}
}
task "Dump database" {
mysql "Dump database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "photoprism"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/photoprism/dump.sql"
}
}
backup {
paths = [
"/local/photoprism",
"/data/nas-container/photoprism",
]
backup_opts {
Host = "nomad"
Exclude = [
"/data/nas-container/photoprism/cache",
]
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,64 +0,0 @@
job "radarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/radarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "radarr"
no_tablespaces = true
dump_to = "/data/nas-container/radarr/Backups/dump-radarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "radarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/radarr/Backups/dump-radarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/radarr"]
backup_opts {
Exclude = [
"radarr_backup_*.zip",
"/data/nas-container/radarr/MediaCover",
"/data/nas-container/radarr/logs",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,36 +0,0 @@
job "sabnzbd" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/sabnzbd"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
backup {
paths = ["/data/media-write/Downloads/sabnzbd"]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,67 +0,0 @@
job "sonarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/sonarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "sonarr"
no_tablespaces = true
dump_to = "/data/nas-container/sonarr/Backups/dump-sonarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "sonarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/sonarr/Backups/dump-sonarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/sonarr"]
backup_opts {
Exclude = [
"sonarr_backup_*.zip",
"/data/nas-container/sonarr/MediaCover",
"/data/nas-container/sonarr/logs",
"*.db",
"*.db-shm",
"*.db-wal",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,5 +0,0 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

View File

@ -0,0 +1,20 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}

115
blocky/blocky.nomad Normal file
View File

@ -0,0 +1,115 @@
variable "config_data" {
type = string
description = "Plain text config file for blocky"
}
job "blocky" {
datacenters = ["dc1"]
type = "system"
priority = 100
update {
max_parallel = 1
auto_revert = true
}
group "blocky" {
network {
mode = "bridge"
port "dns" {
static = "53"
}
port "api" {
host_network = "loopback"
to = "4000"
}
}
service {
name = "blocky-dns"
port = "dns"
}
service {
name = "blocky-api"
port = "api"
meta {
metrics_addr = "${NOMAD_ADDR_api}"
}
tags = [
"traefik.enable=true",
]
connect {
sidecar_service {
proxy {
local_service_port = 400
expose {
path {
path = "/metrics"
protocol = "http"
local_path_port = 4000
listener_port = "api"
}
}
upstreams {
destination_name = "redis"
local_bind_port = 6379
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
memory_max = 50
}
}
}
check {
name = "api-health"
port = "api"
type = "http"
path = "/"
interval = "10s"
timeout = "3s"
}
}
task "blocky" {
driver = "docker"
config {
image = "ghcr.io/0xerr0r/blocky"
ports = ["dns", "api"]
mount {
type = "bind"
target = "/app/config.yml"
source = "app/config.yml"
}
}
resources {
cpu = 50
memory = 50
memory_max = 100
}
template {
data = var.config_data
destination = "app/config.yml"
splay = "1m"
}
}
}
}

25
blocky/blocky.tf Normal file
View File

@ -0,0 +1,25 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
locals {
config_data = templatefile(
"${path.module}/config.yml",
{
"base_hostname" = "${var.base_hostname}",
}
)
}
resource "nomad_job" "blocky" {
hcl2 {
enabled = true
vars = {
"config_data" = "${local.config_data}",
}
}
jobspec = file("${path.module}/blocky.nomad")
}

82
blocky/config.yml Normal file
View File

@ -0,0 +1,82 @@
upstream:
default:
- 1.1.1.1
- 1.0.0.1
quad9:
- 9.9.9.9
- 149.112.112.112
- 2620:fe::fe
- 2620:fe::9
- https://dns.quad9.net/dns-query
- tcp-tls:dns.quad9.net
quad9-unsecured:
- 9.9.9.10
- 149.112.112.10
- 2620:fe::10
- 2620:fe::fe:10
- https://dns10.quad9.net/dns-query
- tcp-tls:dns10.quad9.net
conditional:
mapping:
home.arpa: 192.168.2.1
in-addr.arpa: 192.168.2.1
iot: 192.168.2.1
local: 192.168.2.1
thefij: 192.168.2.1
.: 192.168.2.1
blocking:
blackLists:
ads:
- https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
- http://sysctl.org/cameleon/hosts
- https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt
- https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
- https://hosts-file.net/ad_servers.txt
smarttv:
- https://perflyst.github.io/PiHoleBlocklist/SmartTV.txt
- https://perflyst.github.io/PiHoleBlocklist/regex.list
malware:
- https://mirror1.malwaredomains.com/files/justdomains
whiteLists:
# Move to Gitea when deployed internally
ads:
{{ keyOrDefault "blocky/whitelists/ads" "# None" | indent 6 }}
clientGroupsBlock:
default:
- ads
- malware
- smarttv
customDNS:
customTTL: 1h
mapping:
{{ with service "traefik" -}}
{{- $last := len . | subtract 1 -}}
{{- $services := . -}}
{{ keyOrDefault "global/base_hostname" "${base_hostname}" }}: {{ range $i := loop $last -}}
{{- with index $services $i }}{{ .Address }},{{ end -}}
{{- end -}}
{{- with index . $last }}{{ .Address }}{{ end -}}
{{- end }}
# Other mappings
{{ keyOrDefault "blocky/mappings" "# None" | indent 4 }}
prometheus:
enable: true
redis:
address: {{ env "NOMAD_UPSTREAM_ADDR_redis" }}
# password: ""
# database: 0
connectionAttempts: 10
connectionCooldown: 3s
# queryLog:
# type: mysql
# target: db_user:db_password@tcp(db_host_or_ip:3306)/db_user?charset=utf8mb4&parseTime=True&loc=Local
# logRetentionDays: 7
port: 53
httpPort: 4000

80
bootstrap-values.yml Normal file
View File

@ -0,0 +1,80 @@
---
- name: Bootstrap Consul values
hosts: consul_instances
gather_facts: false
vars_files:
- consul_values.yml
tasks:
- name: Add values
delegate_to: localhost
run_once: true
block:
- name: Install python-consul
pip:
name: python-consul
extra_args: --index-url https://pypi.org/simple
- name: Write values
consul_kv:
host: "{{ inventory_hostname }}"
key: "{{ item.key }}"
value: "{{ item.value }}"
loop: "{{ consul_values | default({}) | dict2items }}"
- name: Bootstrap value values
hosts: vault_instances
gather_facts: false
vars_files:
- ./vault_hashi_vault_values.yml
tasks:
- name: Bootstrap Vault secrets
delegate_to: localhost
run_once: true
block:
- name: Install hvac
pip:
name: hvac
extra_args: --index-url https://pypi.org/simple
- name: Check mount
community.hashi_vault.vault_read:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "/sys/mounts/kv"
ignore_errors: true
register: check_mount
- name: Create kv mount
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "/sys/mounts/kv"
data:
type: kv-v2
when: check_mount is not succeeded
- name: Write values
no_log: true
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "kv/data/{{ item.key }}"
data:
data:
"{{ item.value }}"
loop: "{{ hashi_vault_values | default({}) | dict2items }}"
retries: 2
delay: 5
- name: Write userpass
no_log: true
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "auth/userpass/users/{{ item.name }}"
data: '{"password": "{{ item.password }}", "policies": "{{ item.policies }}"}'
loop: "{{ vault_userpass }}"

View File

@ -1,5 +1,26 @@
# Stops Nomad and clears all data from its ata dirs
---
- name: Delete Consul data
hosts: consul_instances
tasks:
- name: Stop consul
systemd:
name: consul
state: stopped
become: true
- name: Stop vault
systemd:
name: consul
state: stopped
become: true
- name: Remove data dir
file:
path: /opt/consul
state: absent
become: true
- name: Delete Nomad data
hosts: nomad_instances

View File

@ -0,0 +1,4 @@
---
collections:
- name: community.hashi_vault
version: 3.0.0

View File

@ -0,0 +1,4 @@
consul_values:
"blocky/whitelists/ads": |
- |
somedomain.com

134
core.tf Normal file
View File

@ -0,0 +1,134 @@
module "databases" {
source = "./databases"
}
module "blocky" {
source = "./blocky"
base_hostname = var.base_hostname
depends_on = [module.databases]
}
module "traefik" {
source = "./traefik"
base_hostname = var.base_hostname
}
module "nomad_login" {
source = "./levant"
template_path = "service.nomad"
variables = {
name = "nomad-login"
image = "iamthefij/nomad-vault-login"
service_port = 5000
ingress = true
ingress_rule = "Host(`nomad.thefij.rocks`) && PathPrefix(`/login`)"
env = jsonencode({
VAULT_ADDR = "http://$${attr.unique.network.ip-address}:8200",
})
}
}
module "metrics" {
source = "./metrics"
}
module "loki" {
source = "./levant"
template_path = "service.nomad"
variables = {
name = "loki"
image = "grafana/loki:2.2.1"
service_port = 3100
ingress = true
sticky_disk = true
healthcheck = "/ready"
templates = jsonencode([
{
data = file("./loki-config.yml")
dest = "/etc/loki/local-config.yaml"
}
])
}
}
resource "consul_config_entry" "loki_intent" {
name = "loki"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "grafana"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "promtail"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "syslogng-promtail"
Precedence = 9
Type = "consul"
},
]
})
}
resource "nomad_job" "syslog-ng" {
jobspec = file("${path.module}/syslogng.nomad")
}
resource "nomad_job" "ddclient" {
jobspec = file("${path.module}/ddclient.nomad")
}
resource "nomad_job" "lldap" {
jobspec = file("${path.module}/lldap.nomad")
}
resource "consul_config_entry" "syslogng_promtail_intent" {
name = "syslogng-promtail"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "syslogng"
Precedence = 9
Type = "consul"
},
]
})
}
resource "consul_config_entry" "global_access" {
name = "*"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "traefik"
Precedence = 6
Type = "consul"
},
{
Action = "deny"
Name = "*"
Precedence = 5
Type = "consul"
},
]
})
}

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.1.1"
hashes = [
"h1:liQBgBXfQEYmwpoGZUfSsu0U0t/nhvuRZbMhaMz7wcQ=",
"zh:28bc6922e8a21334568410760150d9d413d7b190d60b5f0b4aab2f4ef52efeeb",
"zh:2d4283740e92ce1403875486cd5ff2c8acf9df28c190873ab4d769ce37db10c1",
"zh:457e16d70075eae714a7df249d3ba42c2176f19b6750650152c56f33959028d9",
"zh:49ee88371e355c00971eefee6b5001392431b47b3e760a5c649dda76f59fb8fa",
"zh:614ad3bf07155ed8a5ced41dafb09042afbd1868490a379654b3e970def8e33d",
"zh:75be7199d76987e7549e1f27439922973d1bf27353b44a593bfbbc2e3b9f698f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:888e14a24410d56b37212fbea373a3e0401d0ff8f8e4f4dd00ba8b29de9fed39",
"zh:aa261925e8b152636a0886b3a2149864707632d836e98a88dacea6cfb6302082",
"zh:ac10cefb4064b3bb63d4b0379624a416a45acf778eac0004466f726ead686196",
"zh:b1a3c8b4d5b2dc9b510eac5e9e02665582862c24eb819ab74f44d3d880246d4f",
"zh:c552e2fe5670b6d3ad9a5faf78e3a27197eeedbe2b13928d2c491fa509bc47c7",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0"
hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
]
}

View File

@ -1,185 +0,0 @@
module "authelia" {
source = "../services/service"
name = "authelia"
instance_count = 2
priority = 70
image = "authelia/authelia:4.37"
args = ["--config", "$${NOMAD_TASK_DIR}/authelia.yml"]
ingress = true
service_port = 9999
service_port_static = true
use_wesher = var.use_wesher
# metrics_port = 9959
env = {
AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/ldap_password.txt"
AUTHELIA_JWT_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
AUTHELIA_SESSION_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/session_secret.txt"
AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE = "$${NOMAD_SECRETS_DIR}/storage_encryption_key.txt"
AUTHELIA_STORAGE_MYSQL_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/mysql_password.txt"
AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/oidc_hmac_secret.txt"
AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_PRIVATE_KEY_FILE = "$${NOMAD_SECRETS_DIR}/oidc_issuer_private_key.txt"
# AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_CERTIFICATE_CHAIN_FILE = "$${NOMAD_SECRETS_DIR}/oidc_issuer_certificate_chain.txt"
}
use_mysql = true
use_ldap = true
use_redis = true
use_smtp = true
mysql_bootstrap = {
enabled = true
}
service_tags = [
# Configure traefik to add this middleware
"traefik.http.middlewares.authelia.forwardAuth.address=http://authelia.nomad:$${NOMAD_PORT_main}/api/verify?rd=https%3A%2F%2Fauthelia.${var.base_hostname}%2F",
"traefik.http.middlewares.authelia.forwardAuth.trustForwardHeader=true",
"traefik.http.middlewares.authelia.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email",
"traefik.http.middlewares.authelia-basic.forwardAuth.address=http://authelia.nomad:$${NOMAD_PORT_main}/api/verify?auth=basic",
"traefik.http.middlewares.authelia-basic.forwardAuth.trustForwardHeader=true",
"traefik.http.middlewares.authelia-basic.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email",
]
templates = [
{
data = file("${path.module}/authelia.yml")
dest = "authelia.yml"
mount = false
},
{
data = "{{ with nomadVar \"secrets/ldap\" }}{{ .admin_password }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "ldap_password.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .jwt_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "jwt_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .session_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "session_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .storage_encryption_key }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "storage_encryption_key.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .db_pass }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "mysql_password.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_hmac_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_hmac_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_issuer_private_key }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_issuer_private_key.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_issuer_certificate_chain }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_issuer_certificate_chain.txt"
mount = false
},
{
data = "{{ with nomadVar \"secrets/smtp\" }}{{ .password }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "smtp_password.txt"
mount = false
},
]
}
resource "nomad_acl_policy" "authelia" {
name = "authelia"
description = "Give access to shared authelia variables"
rules_hcl = <<EOH
namespace "default" {
variables {
path "authelia/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = module.authelia.job_id
}
}
# Give access to ldap secrets
resource "nomad_acl_policy" "authelia_ldap_secrets" {
name = "authelia-secrets-ldap"
description = "Give access to LDAP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/ldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = module.authelia.job_id
}
}
resource "nomad_acl_auth_method" "nomad_authelia" {
name = "authelia"
type = "OIDC"
token_locality = "global"
max_token_ttl = "1h0m0s"
default = true
config {
oidc_discovery_url = "https://authelia.${var.base_hostname}"
oidc_client_id = "nomad"
oidc_client_secret = yamldecode(file("${path.module}/../ansible_playbooks/vars/nomad_vars.yml"))["nomad/oidc"]["secret"]
bound_audiences = ["nomad"]
oidc_scopes = [
"groups",
"openid",
]
allowed_redirect_uris = [
"https://nomad.${var.base_hostname}/oidc/callback",
"https://nomad.${var.base_hostname}/ui/settings/tokens",
]
list_claim_mappings = {
"groups" : "roles"
}
}
}
resource "nomad_acl_binding_rule" "nomad_authelia_admin" {
description = "engineering rule"
auth_method = nomad_acl_auth_method.nomad_authelia.name
selector = "\"nomad-deploy\" in list.roles"
bind_type = "role"
bind_name = "admin" # acls.nomad_acl_role.admin.name
}
resource "nomad_acl_binding_rule" "nomad_authelia_deploy" {
description = "engineering rule"
auth_method = nomad_acl_auth_method.nomad_authelia.name
selector = "\"nomad-deploy\" in list.roles"
bind_type = "role"
bind_name = "deploy" # acls.nomad_acl_role.deploy.name
}

View File

@ -1,252 +0,0 @@
theme: auto
# jwt_secret: <file>
{{ with nomadVar "nomad/jobs" }}
default_redirection_url: https://authelia.{{ .base_hostname }}/
{{ end }}
## Set the default 2FA method for new users and for when a user has a preferred method configured that has been
## disabled. This setting must be a method that is enabled.
## Options are totp, webauthn, mobile_push.
default_2fa_method: ""
server:
host: 0.0.0.0
port: {{ env "NOMAD_PORT_main" }}
disable_healthcheck: false
log:
## Level of verbosity for logs: info, debug, trace.
level: debug
format: json
telemetry:
metrics:
enabled: false
# address: '0.0.0.0:{{ env "NOMAD_PORT_metrics" }}'
totp:
disable: false
issuer: {{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
digits: 6
## The TOTP algorithm to use.
## It is CRITICAL you read the documentation before changing this option:
## https://www.authelia.com/c/totp#algorithm
algorithm: sha1
webauthn:
disable: false
timeout: 60s
display_name: {{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
user_verification: preferred
duo_api:
disable: true
# hostname:
# integration_key:
# secret_key:
# enable_self_enrollment: false
authentication_backend:
disable_reset_password: false
## Password Reset Options.
password_reset:
## External reset password url that redirects the user to an external reset portal. This disables the internal reset
## functionality.
# TODO: not sure if this is needed, probably not?
custom_url: ""
refresh_interval: 5m
ldap:
implementation: custom
# stunnel url
url: ldap://127.0.0.1:389
timeout: 5s
# TODO: Maybe use stunnel for this
start_tls: false
base_dn: {{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}
additional_users_dn: ou=people
additional_groups_dn: ou=groups
username_attribute: uid
group_name_attribute: cn
mail_attribute: mail
display_name_attribute: displayName
# To allow sign in both with username and email, one can use a filter like
# (&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=person))
users_filter: "(&({username_attribute}={input})(objectClass=person))"
# Only supported filter by lldap right now
groups_filter: (member={dn})
## The username and password of the admin user.
{{ with nomadVar "secrets/ldap" }}
user: uid={{ .admin_user }},ou=people,{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}
{{ end }}
# password set using secrets file
# password: <secret>
password_policy:
standard:
enabled: false
min_length: 8
max_length: 0
require_uppercase: true
require_lowercase: true
require_number: true
require_special: true
zxcvbn:
enabled: false
min_score: 3
##
## Access Control Configuration
##
## Access control is a list of rules defining the authorizations applied for one resource to users or group of users.
##
## If 'access_control' is not defined, ACL rules are disabled and the 'bypass' rule is applied, i.e., access is allowed
## to anyone. Otherwise restrictions follow the rules defined.
##
## Note: One can use the wildcard * to match any subdomain.
## It must stand at the beginning of the pattern. (example: *.mydomain.com)
##
## Note: You must put patterns containing wildcards between simple quotes for the YAML to be syntactically correct.
##
## Definition: A 'rule' is an object with the following keys: 'domain', 'subject', 'policy' and 'resources'.
##
## - 'domain' defines which domain or set of domains the rule applies to.
##
## - 'subject' defines the subject to apply authorizations to. This parameter is optional and matching any user if not
## provided. If provided, the parameter represents either a user or a group. It should be of the form
## 'user:<username>' or 'group:<groupname>'.
##
## - 'policy' is the policy to apply to resources. It must be either 'bypass', 'one_factor', 'two_factor' or 'deny'.
##
## - 'resources' is a list of regular expressions that matches a set of resources to apply the policy to. This parameter
## is optional and matches any resource if not provided.
##
## Note: the order of the rules is important. The first policy matching (domain, resource, subject) applies.
access_control:
## Default policy can either be 'bypass', 'one_factor', 'two_factor' or 'deny'. It is the policy applied to any
## resource if there is no policy to be applied to the user.
default_policy: deny
networks:
- name: internal
networks:
- 192.168.1.0/24
- 192.168.2.0/24
- 192.168.10.0/24
- name: VPN
networks: 192.168.5.0/24
rules:
{{ range nomadVarList "authelia/access_control/service_rules" }}{{ with nomadVar .Path }}
- domain: '{{ .name }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
{{ .rule.Value | indent 6 }}
{{ end }}{{ end }}
## Rules applied to everyone
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
networks:
- internal
policy: one_factor
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
policy: two_factor
- domain:
# TODO: Drive these from Nomad variables
- 'secure.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
policy: two_factor
session:
## The name of the session cookie.
name: authelia_session
domain: {{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
# Stored in a secrets file
# secret: <in file>
expiration: 1h
inactivity: 5m
remember_me_duration: 1M
redis:
host: 127.0.0.1
port: 6379
# username: authelia
# password: authelia
# database_index: 0
maximum_active_connections: 8
minimum_idle_connections: 0
regulation:
max_retries: 3
find_time: 2m
ban_time: 5m
##
## Storage Provider Configuration
##
## The available providers are: `local`, `mysql`, `postgres`. You must use one and only one of these providers.
storage:
# encryption_key: <in file>
##
## MySQL / MariaDB (Storage Provider)
##
mysql:
host: 127.0.0.1
port: 3306
{{ with nomadVar "nomad/jobs/authelia" }}
database: {{ .db_name }}
username: {{ .db_user }}
# password: <in_file>
{{- end }}
timeout: 5s
##
## Notification Provider
##
## Notifications are sent to users when they require a password reset, a Webauthn registration or a TOTP registration.
## The available providers are: filesystem, smtp. You must use only one of these providers.
notifier:
## You can disable the notifier startup check by setting this to true.
disable_startup_check: false
{{ with nomadVar "secrets/smtp" }}
smtp:
host: {{ .server }}
port: {{ .port }}
username: {{ .user }}
# password: <in file>
{{- end }}
{{ with nomadVar "nomad/jobs/authelia" }}
sender: "{{ .email_sender }}"
## Subject configuration of the emails sent. {title} is replaced by the text from the notifier.
subject: "[Authelia] {title}"
## This address is used during the startup check to verify the email configuration is correct.
## It's not important what it is except if your email server only allows local delivery.
startup_check_address: test@iamthefij.com
{{- end }}
identity_providers:
oidc:
# hmac_secret: <file>
# issuer_private_key: <file>
clients: {{ with nomadVar "nomad/jobs/authelia" }}{{ .oidc_clients.Value }}{{ end }}

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.0.0"
hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}

View File

@ -1,229 +0,0 @@
variable "config_data" {
type = string
description = "Plain text config file for blocky"
}
job "blocky" {
datacenters = ["dc1"]
type = "system"
priority = 100
update {
max_parallel = 1
# TODO: maybe switch to service job from system so we can use canary and autorollback
# auto_revert = true
}
group "blocky" {
network {
mode = "bridge"
port "dns" {
static = "53"
}
port "api" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = "4000"
}
dns {
# Set expclicit DNS servers because tasks, by default, use this task
servers = [
"192.168.2.1",
]
}
}
service {
name = "blocky-dns"
provider = "nomad"
port = "dns"
}
service {
name = "blocky-api"
provider = "nomad"
port = "api"
tags = [
"prometheus.scrape",
"traefik.enable=true",
"traefik.http.routers.blocky-api.entryPoints=websecure",
]
check {
name = "api-health"
port = "api"
type = "http"
path = "/"
interval = "10s"
timeout = "3s"
}
}
task "blocky" {
driver = "docker"
config {
image = "ghcr.io/0xerr0r/blocky:v0.22"
args = ["-c", "$${NOMAD_TASK_DIR}/config.yml"]
ports = ["dns", "api"]
}
resources {
cpu = 50
memory = 75
memory_max = 150
}
template {
data = var.config_data
destination = "$${NOMAD_TASK_DIR}/config.yml"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ range nomadServices }}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") .Name -}}
{{ .Address }} {{ .Name }}.nomad
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/nomad.hosts"
change_mode = "noop"
wait {
min = "10s"
max = "20s"
}
}
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 20
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
[mysql_client]
client = yes
accept = 127.0.0.1:3306
connect = {{ .Address }}:{{ .Port }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
{{- end }}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}}
[redis_client]
client = yes
accept = 127.0.0.1:6379
connect = {{ .Address }}:{{ .Port }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/blocky" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
template {
data = <<EOF
{{- with nomadVar "nomad/jobs/blocky/blocky/stunnel" -}}{{ .redis_stunnel_psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
}
task "mysql-bootstrap" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "mariadb:10"
args = [
"/usr/bin/timeout",
"2m",
"/bin/bash",
"-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
]
}
template {
data = <<EOF
[client]
host=127.0.0.1
port=3306
user=root
{{ with nomadVar "secrets/mysql" }}
password={{ .mysql_root_password }}
{{ end }}
EOF
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/blocky" }}{{ if .db_name -}}
{{ $db_name := .db_name }}
CREATE DATABASE IF NOT EXISTS `{{ $db_name }}`;
CREATE USER IF NOT EXISTS '{{ .db_user }}'@'%' IDENTIFIED BY '{{ .db_pass }}';
GRANT ALL ON `{{ $db_name }}`.* to '{{ .db_user }}'@'%';
{{ with nomadService "grafana" }}{{ with nomadVar "nomad/jobs" -}}
-- Grant grafana read_only user access to db
GRANT SELECT ON `{{ $db_name }}`.* to '{{ .db_user_ro }}'@'%';
{{ end }}{{ end -}}
{{ else -}}
SELECT 'NOOP';
{{ end -}}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql"
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -1,68 +0,0 @@
locals {
config_data = file("${path.module}/config.yml")
}
resource "nomad_job" "blocky" {
hcl2 {
vars = {
"config_data" = local.config_data,
}
}
jobspec = templatefile("${path.module}/blocky.nomad", {
use_wesher = var.use_wesher,
})
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "blocky_mysql_bootstrap_secrets" {
name = "blocky-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "mysql-bootstrap"
}
}
resource "random_password" "blocky_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "blocky_mysql_psk" {
path = "secrets/mysql/allowed_psks/blocky"
items = {
psk = "blocky:${resource.random_password.blocky_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "blocky_mysql_psk" {
name = "blocky-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/blocky" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "stunnel"
}
}

View File

@ -1,148 +0,0 @@
ports:
dns: 53
http: 4000
bootstrapDns:
- upstream: 1.1.1.1
- upstream: 1.0.0.1
- upstream: 9.9.9.9
- upstream: 149.112.112.112
upstreams:
groups:
default:
- https://dns.quad9.net/dns-query
- tcp-tls:dns.quad9.net
- https://one.one.one.one/dns-query
- tcp-tls:one.one.one.one
cloudflare:
- 1.1.1.1
- 1.0.0.1
- 2606:4700:4700::1111
- 2606:4700:4700::1001
- https://one.one.one.one/dns-query
- tcp-tls:one.one.one.one
quad9:
- 9.9.9.9
- 149.112.112.112
- 2620:fe::fe
- 2620:fe::9
- https://dns.quad9.net/dns-query
- tcp-tls:dns.quad9.net
quad9-secured:
- 9.9.9.11
- 149.112.112.11
- 2620:fe::11
- 2620:fe::fe:11
- https://dns11.quad9.net/dns-query
- tcp-tls:dns11.quad9.net
quad9-unsecured:
- 9.9.9.10
- 149.112.112.10
- 2620:fe::10
- 2620:fe::fe:10
- https://dns10.quad9.net/dns-query
- tcp-tls:dns10.quad9.net
conditional:
fallbackUpstream: false
mapping:
home.arpa: 192.168.2.1
in-addr.arpa: 192.168.2.1
iot: 192.168.2.1
local: 192.168.2.1
thefij: 192.168.2.1
.: 192.168.2.1
hostsFile:
sources:
- {{ env "NOMAD_TASK_DIR" }}/nomad.hosts
hostsTTL: 30s
loading:
refreshPeriod: 30s
clientLookup:
upstream: 192.168.2.1
blocking:
blackLists:
ads:
- https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
- http://sysctl.org/cameleon/hosts
- https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt
- https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
# - https://hosts-file.net/ad_servers.txt
smarttv:
- https://perflyst.github.io/PiHoleBlocklist/SmartTV.txt
# - https://perflyst.github.io/PiHoleBlocklist/regex.list
wemo:
- |
# Remote commands
api.xbcs.net
# Firmware updates
fw.xbcs.net
# TURN service
nat.wemo2.com
# Connectivity checks
heartbeat.xwemo.com
antisocial:
- |
facebook.com
instagram.com
reddit.com
twitter.com
youtube.com
custom:
- https://git.thefij.rocks/iamthefij/blocklists/raw/branch/main/block
whiteLists:
ads:
{{ with nomadVar "nomad/jobs/blocky" -}}
{{ .whitelists_ads.Value | indent 6 }}
{{- end }}
custom:
- https://git.thefij.rocks/iamthefij/blocklists/raw/branch/main/allow
clientGroupsBlock:
default:
- ads
- custom
- smarttv
- wemo
customDNS:
customTTL: 1h
mapping:
{{ with nomadVar "nomad/jobs/blocky" }}{{ .mappings.Value | indent 4 }}{{ end }}
# Catch all at top domain to traefik
{{ with nomadService "traefik" -}}
{{- $last := len . | subtract 1 -}}
{{- $services := . -}}
{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}: {{ range $i := loop $last -}}
{{- with index $services $i }}{{ .Address }},{{ end -}}
{{- end -}}
{{- with index . $last }}{{ .Address }}{{ end -}}
{{- end }}
prometheus:
enable: true
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}}
redis:
address: 127.0.0.1:6379
# password: ""
# database: 0
connectionAttempts: 10
connectionCooldown: 3s
{{ end -}}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
{{ with nomadVar "nomad/jobs/blocky" -}}
queryLog:
type: mysql
target: {{ .db_user }}:{{ .db_pass }}@tcp(127.0.0.1:3306)/{{ .db_name }}?charset=utf8mb4&parseTime=True&loc=Local
logRetentionDays: 14
{{ end -}}
{{ end -}}

View File

@ -1,5 +0,0 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

View File

@ -1,293 +0,0 @@
job "grafana" {
datacenters = ["dc1"]
group "grafana" {
count = 1
network {
mode = "bridge"
port "web" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 3000
}
}
ephemeral_disk {
migrate = true
sticky = true
}
service {
name = "grafana"
provider = "nomad"
port = "web"
tags = [
"traefik.enable=true",
"traefik.http.routers.grafana.entryPoints=websecure",
]
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/grafana" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
}
task "mysql-bootstrap" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "mariadb:10"
args = [
"/usr/bin/timeout",
"2m",
"/bin/bash",
"-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
]
}
template {
data = <<EOF
[client]
host=127.0.0.1
port=3306
user=root
{{ with nomadVar "secrets/mysql" -}}
password={{ .mysql_root_password }}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/grafana" -}}
{{ if .db_name -}}
CREATE DATABASE IF NOT EXISTS `{{ .db_name }}`;
CREATE USER IF NOT EXISTS '{{ .db_user }}'@'%' IDENTIFIED BY '{{ .db_pass }}';
GRANT ALL ON `{{ .db_name }}`.* to '{{ .db_user }}'@'%';
-- Create Read Only user
CREATE USER IF NOT EXISTS '{{ .db_user_ro }}'@'%' IDENTIFIED BY '{{ .db_pass_ro }}';
{{ else -}}
SELECT 'NOOP';
{{ end -}}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql"
}
resources {
cpu = 50
memory = 50
}
}
task "grafana" {
driver = "docker"
config {
image = "grafana/grafana:10.0.10"
args = ["--config", "$${NOMAD_ALLOC_DIR}/config/grafana.ini"]
ports = ["web"]
}
env = {
"GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel,natel-discrete-panel",
"GF_PATHS_CONFIG" = "$${NOMAD_ALLOC_DIR}/config/grafana.ini",
"GF_PATHS_PROVISIONING" = "$${NOMAD_ALLOC_DIR}/config/provisioning",
}
template {
data = <<EOF
{{ with nomadVar "secrets/smtp" -}}
GF_SMTP_USER={{ .user }}
GF_SMTP_PASSWORD={{ .password }}
{{ end -}}
{{ with nomadVar "nomad/jobs/grafana" -}}
GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }}
GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .minio_access_key }}
GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .minio_secret_key }}
GRAFANA_ALERT_EMAIL_ADDRESSES={{ .alert_email_addresses }}
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET={{ .oidc_secret }}
{{ if .db_name -}}
# Database storage
GF_DATABASE_TYPE=mysql
GF_DATABASE_HOST=127.0.0.1:3306
GF_DATABASE_NAME={{ .db_name }}
GF_DATABASE_USER={{ .db_user }}
GF_DATABASE_PASSWORD={{ .db_pass }}
{{ end -}}
SLACK_BOT_URL={{ .slack_bot_url }}
SLACK_BOT_TOKEN={{ .slack_bot_token }}
SLACK_HOOK_URL={{ .slack_hook_url }}
{{ end -}}
EOF
env = true
destination = "secrets/conf.env"
}
resources {
cpu = 100
memory = 200
}
}
task "grafana-reprovisioner" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/startup.sh"]
}
resources {
cpu = 100
memory = 100
}
env = {
LOG_FILE = "/var/log/grafana_reloader.log"
}
template {
data = <<EOF
#! /bin/sh
apk add curl
touch "$LOG_FILE"
exec tail -f "$LOG_FILE"
EOF
perms = "777"
destination = "$${NOMAD_TASK_DIR}/startup.sh"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/grafana" -}}
GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }}
{{ end -}}
EOF
env = true
destination = "$${NOMAD_SECRETS_DIR}/conf.env"
}
template {
data = <<EOF
#! /bin/sh
exec > "$LOG_FILE"
exec 2>&1
GRAFANA_URL=http://127.0.0.1:3000
echo "Reload dashboards"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/dashboards/reload
echo "Reload datasources"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/datasources/reload
echo "Reload plugins"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/plugins/reload
echo "Reload notifications"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/notifications/reload
echo "Reload access-control"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/access-control/reload
echo "Reload alerting"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/alerting/reload
EOF
change_mode = "noop"
perms = "777"
destination = "$${NOMAD_TASK_DIR}/reload_config.sh"
}
%{ for config_file in fileset(join("/", [module_path, "grafana"]), "**") ~}
template {
data = <<EOF
${file(join("/", [module_path, "grafana", config_file]))}
EOF
destination = "$${NOMAD_ALLOC_DIR}/config/${config_file}"
perms = 777
# Set owner to grafana uid
# uid = 472
# Change template delimeter for dashboard files that use json and have double curly braces and square braces
%{ if endswith(config_file, ".json") ~}
left_delimiter = "<<<<"
right_delimiter = ">>>>"
%{ endif }
change_mode = "script"
change_script {
command = "/local/reload_config.sh"
}
}
%{ endfor }
}
task "grafana-image-renderer" {
driver = "docker"
constraint {
attribute = "$${attr.cpu.arch}"
value = "amd64"
}
config {
image = "grafana/grafana-image-renderer:3.6.1"
ports = ["renderer"]
}
env = {
"RENDERING_MODE" = "clustered"
"RENDERING_CLUSTERING_MODE" = "browser"
"RENDERING_CLUSTERING_MAX_CONCURRENCY" = 5
"RENDERING_CLUSTERING_TIMEOUT" = 30
}
}
}
}

View File

@ -1,882 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"description": "Query report for blocky (MySQL)",
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": 14980,
"graphTooltip": 0,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
},
"decimals": 0,
"mappings": [],
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 0,
"y": 0
},
"id": 14,
"links": [],
"options": {
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"values": [
"value"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "8.1.2",
"repeatDirection": "v",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "time_series",
"group": [],
"metricColumn": "none",
"rawQuery": true,
"rawSql": "SELECT t.response_type, t.request_Ts as time, count(*) as cnt from log_entries t \n WHERE $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0)\n group by t.response_type\n order by t.request_Ts",
"refId": "A",
"select": [
[
{
"params": [
"value"
],
"type": "column"
}
]
],
"timeColumn": "time",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Query count by response type",
"transformations": [],
"type": "piechart"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
},
"decimals": 0,
"mappings": [],
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 6,
"y": 0
},
"id": 16,
"links": [],
"options": {
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"values": [
"value"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "time_series",
"group": [],
"metricColumn": "none",
"rawQuery": true,
"rawSql": "SELECT t.request_ts AS time,\n case when t.reason like 'BLOCKED%' then SUBSTRING_INDEX(SUBSTRING_INDEX(t.reason,'(',-1), ')',1) else '' end AS metric,\n count(t.reason) AS cnt\nFROM log_entries t\nWHERE t.response_type ='BLOCKED'\n AND $__timeFilter(t.request_Ts)\n AND t.client_name in ($client_name)\n AND (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0)\nGROUP BY 2\nORDER BY time",
"refId": "A",
"select": [
[
{
"params": [
"duration_ms"
],
"type": "column"
}
]
],
"table": "log_entries",
"timeColumn": "request_ts",
"timeColumnType": "timestamp",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Blocked by Blacklist",
"type": "piechart"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"displayName": "$__cell_1",
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 13,
"links": [],
"options": {
"displayMode": "gradient",
"minVizHeight": 10,
"minVizWidth": 0,
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": true
},
"showUnfilled": true,
"text": {}
},
"pluginVersion": "9.2.4",
"repeatDirection": "v",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "table",
"group": [],
"metricColumn": "f",
"rawQuery": true,
"rawSql": "SELECT t.request_Ts as time, t.client_name as metric, count(*) as cnt from log_entries t \n WHERE $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0)\n group by t.client_name\n order by 3 desc",
"refId": "A",
"select": [
[
{
"params": [
"value"
],
"type": "column"
}
]
],
"timeColumn": "time",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Query count by client",
"transformations": [],
"type": "bargauge"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "Top 20 effective top level domain plus one more label",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"displayName": "$__cell_0",
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"id": 11,
"options": {
"displayMode": "gradient",
"minVizHeight": 10,
"minVizWidth": 0,
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": true
},
"showUnfilled": true
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "table",
"group": [],
"hide": false,
"metricColumn": "question_name",
"rawQuery": true,
"rawSql": "SELECT t.effective_tldp as metric, count(*) as value from log_entries t \nWHERE $__timeFilter(t.request_Ts) \n and t.response_type in ($response_type) \n and t.client_name in ($client_name) \n and (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0) \n group by t.effective_tldp order by count(*) desc limit 20",
"refId": "A",
"select": [
[
{
"params": [
"value"
],
"type": "column"
}
]
],
"table": "log_entries",
"timeColumn": "request_ts",
"where": []
}
],
"title": "Top 20 effective TLD+1",
"type": "bargauge"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"displayName": "$__cell_0",
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 8
},
"id": 8,
"options": {
"displayMode": "gradient",
"minVizHeight": 10,
"minVizWidth": 0,
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": true
},
"showUnfilled": true
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "table",
"group": [],
"hide": false,
"metricColumn": "question_name",
"rawQuery": true,
"rawSql": "SELECT t.question_name as metric, count(*) as value from log_entries t \n WHERE $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0) \n group by t.question_name order by count(*) desc limit 20",
"refId": "A",
"select": [
[
{
"params": [
"value"
],
"type": "column"
}
]
],
"table": "log_entries",
"timeColumn": "request_ts",
"where": []
}
],
"title": "Top 20 queried domains",
"type": "bargauge"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 24,
"x": 0,
"y": 16
},
"id": 12,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "time_series",
"group": [],
"metricColumn": "none",
"rawQuery": true,
"rawSql": "SELECT\n $__timeGroupAlias(t.request_Ts, '30m'),\n t.client_name as metric,\n count(*) as c\nFROM log_entries t\nWHERE\n $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0)\nGROUP BY 1,2\nORDER BY 1",
"refId": "A",
"select": [
[
{
"params": [
"duration_ms"
],
"type": "column"
}
]
],
"table": "log_entries",
"timeColumn": "request_ts",
"timeColumnType": "timestamp",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Queries number per client (30m)",
"type": "timeseries"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "dtdurationms"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 23
},
"id": 10,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "time_series",
"group": [],
"metricColumn": "none",
"rawQuery": true,
"rawSql": "SELECT\n UNIX_TIMESTAMP(t.request_Ts) as time,\n t.duration_ms\nFROM log_entries t\nWHERE\n $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0)\nORDER BY request_ts",
"refId": "A",
"select": [
[
{
"params": [
"duration_ms"
],
"type": "column"
}
]
],
"table": "log_entries",
"timeColumn": "request_ts",
"timeColumnType": "timestamp",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Query duration",
"type": "timeseries"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "Last 100 queries, newest on top",
"fieldConfig": {
"defaults": {
"custom": {
"displayMode": "auto",
"filterable": false,
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "time"
},
"properties": [
{
"id": "unit",
"value": "dateTimeAsIsoNoDateIfToday"
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 31
},
"id": 4,
"options": {
"footer": {
"fields": "",
"reducer": [
"sum"
],
"show": false
},
"showHeader": true
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "table",
"group": [],
"metricColumn": "none",
"rawQuery": true,
"rawSql": "SELECT UNIX_TIMESTAMP(t.request_Ts) as \"time\", \n t.client_ip as \"client IP\", \n t.client_name as \"client name\", \n t.duration_ms as \"duration in ms\", \n t.response_type as \"response type\", \n t.question_type as \"question type\", \n t.question_name as \"question name\", \n t.effective_tldp as \"effective TLD+1\", \n t.answer as \"answer\" from log_entries t \n WHERE $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0) \n order by t.request_Ts desc limit 100",
"refId": "A",
"select": [
[
{
"params": [
"value"
],
"type": "column"
}
]
],
"timeColumn": "time",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Last queries",
"type": "table"
}
],
"refresh": "",
"schemaVersion": 37,
"style": "dark",
"tags": [],
"templating": {
"list": [
{
"allValue": "",
"current": {
"selected": false,
"text": "All",
"value": "$__all"
},
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"definition": "select distinct client_name from log_entries",
"hide": 0,
"includeAll": true,
"label": "Client name",
"multi": true,
"name": "client_name",
"options": [],
"query": "select distinct client_name from log_entries",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"current": {
"selected": true,
"text": [
"All"
],
"value": [
"$__all"
]
},
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"definition": "select distinct response_type from log_entries",
"hide": 0,
"includeAll": true,
"label": "Response type",
"multi": true,
"name": "response_type",
"options": [],
"query": "select distinct response_type from log_entries",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"current": {
"selected": false,
"text": "",
"value": ""
},
"hide": 0,
"label": "Domain (contains)",
"name": "question",
"options": [
{
"selected": true,
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
}
]
},
"time": {
"from": "now-24h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Blocky query",
"uid": "AVmWSVWgz",
"version": 1,
"weekStart": ""
}

File diff suppressed because it is too large Load Diff

View File

@ -1,411 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"fieldConfig": {
"defaults": {
"color": {
"fixedColor": "rgb(31, 120, 193)",
"mode": "fixed"
},
"mappings": [
{
"options": {
"match": "null",
"result": {
"text": "N/A"
}
},
"type": "special"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "#d44a3a",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": 0
},
{
"color": "#299c46",
"value": 1
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 4,
"x": 0,
"y": 0
},
"id": 2,
"links": [],
"maxDataPoints": 100,
"options": {
"colorMode": "none",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"expr": "count(minitor_monitor_up_count)",
"format": "time_series",
"instant": true,
"intervalFactor": 2,
"refId": "A"
}
],
"title": "Total Monitors",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"decimals": 0,
"mappings": [
{
"options": {
"match": "null",
"result": {
"text": "N/A"
}
},
"type": "special"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "#299c46",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": 1
},
{
"color": "#d44a3a",
"value": 2
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 4,
"x": 4,
"y": 0
},
"id": 6,
"links": [],
"maxDataPoints": 100,
"options": {
"colorMode": "background",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"expr": "count(minitor_monitor_up_count)-count(minitor_monitor_up_count>=1)",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
"refId": "A"
}
],
"title": "Total Down Services",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"fillOpacity": 70,
"lineWidth": 0,
"spanNulls": 900000
},
"mappings": [
{
"options": {
"0": {
"index": 1,
"text": "Down"
},
"1": {
"index": 0,
"text": "Up"
}
},
"type": "value"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "red",
"value": null
},
{
"color": "red",
"value": 0
},
{
"color": "green",
"value": 1
}
]
},
"unit": "bool_on_off"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 14,
"x": 8,
"y": 0
},
"id": 4,
"links": [],
"options": {
"alignValue": "left",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": false
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "never",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"editorMode": "code",
"exemplar": false,
"expr": "sum(minitor_monitor_up_count) by (monitor)",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
"legendFormat": "{{monitor}}",
"range": true,
"refId": "A"
}
],
"title": "Service Status",
"type": "state-timeline"
},
{
"columns": [],
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"fontSize": "100%",
"gridPos": {
"h": 11,
"w": 22,
"x": 0,
"y": 7
},
"id": 8,
"links": [],
"scroll": true,
"showHeader": true,
"sort": {
"col": 0,
"desc": true
},
"styles": [
{
"alias": "Time",
"align": "auto",
"dateFormat": "YYYY-MM-DD HH:mm:ss",
"pattern": "Time",
"type": "date"
},
{
"alias": "Status",
"align": "auto",
"colorMode": "cell",
"colors": [
"#F2495C",
"#F2495C",
"rgba(50, 172, 45, 0.97)"
],
"decimals": 0,
"link": false,
"mappingType": 1,
"pattern": "Value",
"thresholds": [
"0",
"1"
],
"type": "string",
"unit": "short",
"valueMaps": [
{
"text": "Ok",
"value": "1"
},
{
"text": "Not Ok",
"value": "0"
}
]
}
],
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"expr": "minitor_monitor_up_count",
"format": "time_series",
"instant": true,
"intervalFactor": 1,
"legendFormat": "{{monitor}}",
"refId": "A"
}
],
"title": "Monitor status",
"transform": "timeseries_to_rows",
"type": "table-old"
}
],
"refresh": "30s",
"schemaVersion": 37,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Minitor Monitor",
"uid": "qoE5Hrxiz",
"version": 1,
"weekStart": ""
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +0,0 @@
apiVersion: 1
providers:
- name: default
folder: 'Provisioned'
type: file
disableDeletion: false
options:
path: {{ env "NOMAD_ALLOC_DIR" }}/config/provisioning/dashboards/default

View File

@ -1,19 +0,0 @@
---
apiVersion: 1
datasources:
- name: HASS Metrics
url: "http://192.168.2.75:8086"
type: influxdb
access: proxy
database: hass
jsonData:
dbName: hass
- name: Proxmox Metrics
url: "http://192.168.2.75:8086"
type: influxdb
access: proxy
database: proxmox
jsonData:
dbName: proxmox

View File

@ -1,16 +0,0 @@
---
apiVersion: 1
datasources:
- name: Blocky logs
url: 127.0.0.1:3306
# TODO: Looking for an acl friendly way to expose this since it's a variable in blocky setup
database: blocky
type: mysql
isDefault: false
version: 1
{{ with nomadVar "nomad/jobs/grafana" -}}
user: {{ .db_user_ro }}
secureJsonData:
password: {{ .db_pass_ro }}
{{- end }}

View File

@ -1,97 +0,0 @@
variable "lego_version" {
default = "4.14.2"
type = string
}
variable "nomad_var_dirsync_version" {
default = "0.0.2"
type = string
}
job "lego" {
type = "batch"
periodic {
cron = "@weekly"
prohibit_overlap = true
}
group "main" {
network {
dns {
servers = ["1.1.1.1", "1.0.0.1"]
}
}
task "main" {
driver = "exec"
config {
# image = "alpine:3"
command = "/bin/bash"
args = ["${NOMAD_TASK_DIR}/start.sh"]
}
artifact {
source = "https://github.com/go-acme/lego/releases/download/v${var.lego_version}/lego_v${var.lego_version}_linux_${attr.cpu.arch}.tar.gz"
}
artifact {
source = "https://git.iamthefij.com/iamthefij/nomad-var-dirsync/releases/download/v${var.nomad_var_dirsync_version}/nomad-var-dirsync-linux-${attr.cpu.arch}.tar.gz"
}
template {
data = <<EOH
#! /bin/sh
set -ex
cd ${NOMAD_TASK_DIR}
echo "Read certs from nomad vars"
${NOMAD_TASK_DIR}/nomad-var-dirsync-linux-{{ env "attr.cpu.arch" }} -root-var=secrets/certs read .
action=run
if [ -f /.lego/certificates/_.thefij.rocks.crt ]; then
action=renew
fi
echo "Attempt to $action certificates"
${NOMAD_TASK_DIR}/lego \
--accept-tos --pem \
--email=iamthefij@gmail.com \
--domains="*.thefij.rocks" \
--dns="cloudflare" \
$action \
--$action-hook="${NOMAD_TASK_DIR}/nomad-var-dirsync-linux-{{ env "attr.cpu.arch" }} -root-var=secrets/certs write .lego" \
EOH
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/lego" -}}
CF_DNS_API_TOKEN={{ .domain_lego_dns }}
CF_ZONE_API_TOKEN={{ .domain_lego_dns }}
{{- end }}
EOH
destination = "secrets/cloudflare.env"
env = true
}
env = {
NOMAD_ADDR = "unix:///secrets/api.sock"
}
identity {
env = true
}
resources {
cpu = 50
memory = 100
}
}
}
}

View File

@ -1,23 +0,0 @@
resource "nomad_job" "lego" {
jobspec = file("${path.module}/lego.nomad")
}
resource "nomad_acl_policy" "secrets_certs_write" {
name = "secrets-certs-write"
description = "Write certs to secrets store"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/certs/*" {
capabilities = ["write", "read"]
}
path "secrets/certs" {
capabilities = ["write", "read"]
}
}
}
EOH
job_acl {
job_id = "lego/*"
}
}

View File

@ -1,41 +0,0 @@
auth_enabled: false
server:
http_listen_port: 3100
common:
ring:
instance_addr: 127.0.0.1
kvstore:
store: inmemory
replication_factor: 1
path_prefix: /tmp/loki
schema_config:
configs:
- from: 2020-05-15
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
storage_config:
boltdb_shipper:
active_index_directory: {{ env "NOMAD_TASK_DIR" }}/index
filesystem:
directory: {{ env "NOMAD_TASK_DIR" }}/chunks
limits_config:
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 168h
table_manager:
retention_deletes_enabled: true
retention_period: 168h

View File

@ -1,24 +0,0 @@
module "loki" {
source = "../services/service"
detach = false
name = "loki"
image = "grafana/loki:2.8.7"
args = ["--config.file=$${NOMAD_TASK_DIR}/loki-config.yml"]
service_port = 3100
ingress = true
use_wesher = var.use_wesher
service_check = {
path = "/ready"
}
sticky_disk = true
templates = [
{
data = file("${path.module}/loki-config.yml")
dest = "loki-config.yml"
mount = false
}
]
}

View File

@ -1,27 +0,0 @@
module "blocky" {
source = "./blocky"
use_wesher = var.use_wesher
# Not in this module
# depends_on = [module.databases]
}
module "traefik" {
source = "./traefik"
}
resource "nomad_job" "nomad-client-stalker" {
# Stalker used to allow using Nomad service registry to identify nomad client hosts
jobspec = file("${path.module}/nomad-client-stalker.nomad")
}
resource "nomad_job" "syslog-ng" {
jobspec = file("${path.module}/syslogng.nomad")
depends_on = [module.loki]
}
resource "nomad_job" "ddclient" {
jobspec = file("${path.module}/ddclient.nomad")
}

View File

@ -1,95 +0,0 @@
resource "nomad_job" "exporters" {
jobspec = templatefile("${path.module}/exporters.nomad", {
use_wesher = var.use_wesher,
})
}
resource "nomad_job" "prometheus" {
jobspec = templatefile("${path.module}/prometheus.nomad", {
use_wesher = var.use_wesher,
})
detach = false
}
resource "nomad_job" "grafana" {
jobspec = templatefile("${path.module}/grafana.nomad", {
module_path = path.module
use_wesher = var.use_wesher
})
depends_on = [nomad_job.prometheus]
}
resource "nomad_acl_policy" "grafana_smtp_secrets" {
name = "grafana-secrets-smtp"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/smtp" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "grafana"
}
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "grafana_mysql_bootstrap_secrets" {
name = "grafana-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "mysql-bootstrap"
}
}
resource "random_password" "grafana_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "grafana_mysql_psk" {
path = "secrets/mysql/allowed_psks/grafana"
items = {
psk = "grafana:${resource.random_password.grafana_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "grafana_mysql_psk" {
name = "grafana-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/grafana" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "stunnel"
}
}

View File

@ -1,32 +0,0 @@
job "nomad-client-stalker" {
type = "system"
group "main" {
network {
mode = "host"
port "main" {}
}
service {
name = "nomad-client-stalker"
provider = "nomad"
port = "main"
}
task "main" {
driver = "docker"
config {
image = "busybox"
args = ["tail", "-f", "/dev/null"]
}
resources {
cpu = 10
memory = 15
memory_max = 30
}
}
}
}

View File

@ -1,145 +0,0 @@
job "prometheus" {
datacenters = ["dc1"]
group "prometheus" {
count = 1
network {
mode = "bridge"
port "web" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 9090
}
port "pushgateway" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
static = 9091
}
}
ephemeral_disk {
migrate = true
sticky = true
}
service {
name = "prometheus"
provider = "nomad"
port = "web"
// TODO: Remove traefik tags
tags = [
"traefik.enable=true",
"traefik.http.routers.prometheus.entryPoints=websecure",
]
}
service {
name = "pushgateway"
provider = "nomad"
port = "pushgateway"
}
task "prometheus" {
driver = "docker"
config {
image = "prom/prometheus:v2.43.0"
ports = ["web"]
args = [
"--config.file=$${NOMAD_TASK_DIR}/prometheus.yml",
"--storage.tsdb.path=$${NOMAD_ALLOC_DIR}/data/tsdb",
"--web.listen-address=0.0.0.0:9090",
"--web.console.libraries=/usr/share/prometheus/console_libraries",
"--web.console.templates=/usr/share/prometheus/consoles",
]
}
template {
data = <<EOF
---
global:
scrape_interval: 30s
evaluation_interval: 3s
scrape_configs:
- job_name: prometheus
static_configs:
- targets:
- 127.0.0.1:9090
- job_name: "pushgateway"
honor_labels: true
static_configs:
- targets:
- 127.0.0.1:9091
- job_name: "nomad_client"
metrics_path: "/v1/metrics"
params:
format:
- "prometheus"
nomad_sd_configs:
# TODO: Use NOMAD_SECRETS_DIR/api.sock and workload idenity when
# workload acls can be set using terraform
- server: "http://{{env "attr.unique.network.ip-address"}}:4646"
relabel_configs:
- source_labels: [__meta_nomad_service]
regex: nomad-client-stalker
action: keep
- source_labels: [__meta_nomad_address]
replacement: "$1:4646"
target_label: __address__
- job_name: "nomad_services"
metrics_path: "/metrics"
nomad_sd_configs:
- server: "http://{{env "attr.unique.network.ip-address"}}:4646"
relabel_configs:
- source_labels: [__meta_nomad_tags]
regex: .*(prometheus.scrape).*
action: keep
- source_labels: [__meta_nomad_service_address,__meta_nomad_service_port]
separator: ":"
target_label: __address__
- source_labels: [__meta_nomad_service]
target_label: nomad_service
- source_labels: [__meta_nomad_dc]
target_label: nomad_dc
- source_labels: [__meta_nomad_node_id]
target_label: nomad_node_id
EOF
change_mode = "signal"
change_signal = "SIGHUP"
destination = "$${NOMAD_TASK_DIR}/prometheus.yml"
}
resources {
cpu = 100
memory = 300
}
}
task "pushgateway" {
driver = "docker"
config {
image = "prom/pushgateway"
ports = ["pushgateway"]
args = [
"--persistence.file=$${NOMAD_ALLOC_DIR}/pushgateway-persistence",
]
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -1,21 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.1.0"
hashes = [
"h1:ek0L7fA+4R1/BXhbutSRqlQPzSZ5aY/I2YfVehuYeEU=",
"zh:39ba4d4fc9557d4d2c1e4bf866cf63973359b73e908cce237c54384512bdb454",
"zh:40d2b66e3f3675e6b88000c145977c1d5288510c76b702c6c131d9168546c605",
"zh:40fbe575d85a083f96d4703c6b7334e9fc3e08e4f1d441de2b9513215184ebcc",
"zh:42ce6db79e2f94557fae516ee3f22e5271f0b556638eb45d5fbad02c99fc7af3",
"zh:4acf63dfb92f879b3767529e75764fef68886521b7effa13dd0323c38133ce88",
"zh:72cf35a13c2fb542cd3c8528826e2390db9b8f6f79ccb41532e009ad140a3269",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:8b8bcc136c05916234cb0c3bcc3d48fda7ca551a091ad8461ea4ab16fb6960a3",
"zh:8e1c2f924eae88afe7ac83775f000ae8fd71a04e06228edf7eddce4df2421169",
"zh:abc6e725531fc06a8e02e84946aaabc3453ecafbc1b7a442ea175db14fd9c86a",
"zh:b735fcd1fb20971df3e92f81bb6d73eef845dcc9d3d98e908faa3f40013f0f69",
"zh:ce59797282505d872903789db8f092861036da6ec3e73f6507dac725458a5ec9",
]
}

View File

@ -1,314 +0,0 @@
job "traefik" {
datacenters = ["dc1"]
type = "service"
priority = 100
constraint {
attribute = "${node.class}"
value = "ingress"
}
constraint {
distinct_hosts = true
}
update {
max_parallel = 1
# canary = 1
# auto_promote = true
auto_revert = true
}
group "traefik" {
count = 2
network {
port "web" {
static = 80
}
port "websecure" {
static = 443
}
port "syslog" {
static = 514
}
port "gitssh" {
static = 2222
}
port "metrics" {}
dns {
servers = [
"192.168.2.101",
"192.168.2.102",
"192.168.2.30",
]
}
}
ephemeral_disk {
migrate = true
sticky = true
}
task "traefik" {
driver = "docker"
service {
name = "traefik"
provider = "nomad"
port = "web"
check {
type = "http"
path = "/ping"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.traefik.entryPoints=websecure",
"traefik.http.routers.traefik.service=api@internal",
]
}
service {
name = "traefik-metrics"
provider = "nomad"
port = "metrics"
tags = [
"prometheus.scrape",
]
}
config {
image = "traefik:2.10"
ports = ["web", "websecure", "syslog", "gitssh", "metrics"]
network_mode = "host"
mount {
type = "bind"
target = "/etc/traefik"
source = "local/config"
}
mount {
type = "bind"
target = "/etc/traefik/usersfile"
source = "secrets/usersfile"
}
mount {
type = "bind"
target = "/etc/traefik/certs"
source = "secrets/certs"
}
}
template {
# Avoid conflict with TOML lists [[ ]] and Go templates {{ }}
left_delimiter = "<<"
right_delimiter = ">>"
data = <<EOH
[log]
level = "DEBUG"
[entryPoints]
[entryPoints.web]
address = ":80"
[entryPoints.web.http]
[entryPoints.web.http.redirections]
[entryPoints.web.http.redirections.entrypoint]
to = "websecure"
scheme = "https"
[entryPoints.websecure]
address = ":443"
[entryPoints.websecure.http.tls]
[entryPoints.metrics]
address = ":<< env "NOMAD_PORT_metrics" >>"
[entryPoints.syslogtcp]
address = ":514"
[entryPoints.syslogudp]
address = ":514/udp"
[entryPoints.gitssh]
address = ":2222"
[api]
dashboard = true
[ping]
entrypoint = "web"
[metrics]
[metrics.prometheus]
entrypoint = "metrics"
# manualRouting = true
[providers.file]
directory = "/etc/traefik/conf"
watch = true
[providers.nomad]
exposedByDefault = false
defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)"
[providers.nomad.endpoint]
address = "http://127.0.0.1:4646"
EOH
destination = "${NOMAD_TASK_DIR}/config/traefik.toml"
}
template {
data = <<EOH
[http]
[http.routers]
[http.routers.nomad]
entryPoints = ["websecure"]
service = "nomad"
rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
{{ with nomadVar "nomad/jobs/traefik" }}{{ with .external }}{{ with .Value | parseYAML -}}
{{ range $service, $url := . }}
[http.routers.{{ $service }}]
entryPoints = ["websecure"]
service = "{{ $service }}"
rule = "Host(`{{ $service }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
{{ end }}
{{- end }}{{ end }}{{ end }}
[http.services]
[http.services.nomad]
[http.services.nomad.loadBalancer]
[[http.services.nomad.loadBalancer.servers]]
url = "http://127.0.0.1:4646"
{{ with nomadVar "nomad/jobs/traefik" }}{{ with .external }}{{ with .Value | parseYAML -}}
{{ range $service, $url := . }}
[http.services.{{ $service }}]
[http.services.{{ $service }}.loadBalancer]
[[http.services.{{ $service }}.loadBalancer.servers]]
url = "{{ $url }}"
{{ end }}
{{- end }}{{ end }}{{ end }}
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/route-hashi.toml"
change_mode = "noop"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOH
{{ with nomadService "syslogng" -}}
[tcp.routers]
[tcp.routers.syslogtcp]
entryPoints = ["syslogtcp"]
service = "syslogngtcp"
rule = "HostSNI(`*`)"
[tcp.services]
[tcp.services.syslogngtcp]
[tcp.services.syslogngtcp.loadBalancer]
{{ range . -}}
[[tcp.services.syslogngtcp.loadBalancer.servers]]
address = "{{ .Address }}:{{ .Port }}"
{{ end -}}
{{- end }}
{{ with nomadService "syslogng" -}}
[udp.routers]
[udp.routers.syslogudp]
entryPoints = ["syslogudp"]
service = "syslogngudp"
[udp.services]
[udp.services.syslogngudp]
[udp.services.syslogngudp.loadBalancer]
{{ range . -}}
[[udp.services.syslogngudp.loadBalancer.servers]]
address = "{{ .Address }}:{{ .Port }}"
{{ end -}}
{{- end }}
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/route-syslog-ng.toml"
change_mode = "noop"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{- with nomadVar "secrets/certs/_lego/certificates/__thefij_rocks_crt" }}{{ .contents }}{{ end -}}"
EOF
destination = "${NOMAD_SECRETS_DIR}/certs/_.thefij.rocks.crt"
change_mode = "noop"
}
template {
data = <<EOF
{{- with nomadVar "secrets/certs/_lego/certificates/__thefij_rocks_key" }}{{ .contents }}{{ end -}}"
EOF
destination = "${NOMAD_SECRETS_DIR}/certs/_.thefij.rocks.key"
change_mode = "noop"
}
template {
data = <<EOH
[[tls.certificates]]
certFile = "/etc/traefik/certs/_.thefij.rocks.crt"
keyFile = "/etc/traefik/certs/_.thefij.rocks.key"
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/dynamic-tls.toml"
change_mode = "noop"
}
template {
data = <<EOH
[http.middlewares]
{{ with nomadVar "nomad/jobs/traefik" }}
{{ if .usersfile }}
[http.middlewares.basic-auth.basicAuth]
usersFile = "/etc/traefik/usersfile"
{{- end }}
{{- end }}
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/middlewares.toml"
change_mode = "noop"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/traefik" -}}
{{ .usersfile }}
{{- end }}
EOH
destination = "${NOMAD_SECRETS_DIR}/usersfile"
change_mode = "noop"
}
resources {
cpu = 100
memory = 150
}
}
}
}

View File

@ -1,23 +0,0 @@
resource "nomad_job" "traefik" {
jobspec = file("${path.module}/traefik.nomad")
}
resource "nomad_acl_policy" "treafik_secrets_certs_read" {
name = "traefik-secrets-certs-read"
description = "Read certs to secrets store"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/certs/*" {
capabilities = ["read"]
}
path "secrets/certs" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = resource.nomad_job.traefik.id
}
}

View File

@ -1,11 +0,0 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.0.0"
hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}

74
databases/adminer.nomad Normal file
View File

@ -0,0 +1,74 @@
job "adminer" {
datacenters = ["dc1"]
type = "service"
group "adminer" {
count = 1
# Some affinity to stateful hosts?
network {
mode = "bridge"
port "adminer" {
host_network = "loopback"
to = 8080
}
}
service {
name = "adminer"
port = "adminer"
connect {
sidecar_service {
proxy {
local_service_port = 8080
upstreams {
destination_name = "mysql-server"
local_bind_port = 4040
}
upstreams {
destination_name = "postgres"
local_bind_port = 5432
}
config {
protocol = "tcp"
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 25
}
}
}
tags = [
"traefik.enable=true",
"traefik.http.routers.adminer.entryPoints=websecure",
]
}
task "adminer" {
driver = "docker"
config {
image = "adminer"
ports = ["adminer"]
}
env = {
"ADMINER_DEFAULT_SERVER" = "${NOMAD_UPSTREAM_ADDR_mysql_server}"
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -1,249 +0,0 @@
job "lldap" {
datacenters = ["dc1"]
type = "service"
priority = 80
group "lldap" {
network {
mode = "bridge"
port "web" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
}
port "ldap" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
}
port "tls" {}
}
service {
name = "lldap"
provider = "nomad"
port = "ldap"
}
service {
name = "lldap-tls"
provider = "nomad"
port = "tls"
}
service {
name = "ldap-admin"
provider = "nomad"
port = "web"
tags = [
"traefik.enable=true",
"traefik.http.routers.ldap-admin.entryPoints=websecure",
]
}
task "lldap" {
driver = "docker"
config {
image = "ghcr.io/lldap/lldap:v0.5"
ports = ["ldap", "web"]
args = ["run", "--config-file", "$${NOMAD_TASK_DIR}/lldap_config.toml"]
}
env = {
"LLDAP_VERBOSE" = "true"
"LLDAP_LDAP_PORT" = "$${NOMAD_PORT_ldap}"
"LLDAP_HTTP_PORT" = "$${NOMAD_PORT_web}"
"LLDAP_DATABASE_URL_FILE" = "$${NOMAD_SECRETS_DIR}/database_url.txt"
"LLDAP_KEY_SEED_FILE" = "$${NOMAD_SECRETS_DIR}/key_seed.txt"
"LLDAP_JWT_SECRET_FILE" = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
"LLDAP_USER_PASS_FILE" = "$${NOMAD_SECRETS_DIR}/user_pass.txt"
"LLDAP_SMTP_OPTIONS__PASSWORD_FILE" = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
}
template {
data = <<EOH
ldap_base_dn = "{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}"
{{ with nomadVar "secrets/ldap" -}}
ldap_user_dn = "{{ .admin_user }}"
ldap_user_email = "{{ .admin_email }}"
{{ end -}}
{{ with nomadVar "nomad/jobs/lldap" -}}
[smtp_options]
from = "{{ .smtp_from }}"
reply_to = "{{ .smtp_reply_to }}"
enable_password_reset = true
{{ end -}}
{{ with nomadVar "secrets/smtp" -}}
server = "{{ .server }}"
port = {{ .port }}
tls_required = {{ .tls.Value | toLower }}
user = "{{ .user }}"
{{ end -}}
EOH
destination = "$${NOMAD_TASK_DIR}/lldap_config.toml"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}mysql://{{ .db_user }}:{{ .db_pass }}@127.0.0.1:3306/{{ .db_name }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/database_url.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}{{ .key_seed }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/key_seed.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}{{ .jwt_secret }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"secrets/ldap\" }}{{ .admin_password }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/user_pass.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"secrets/smtp\" }}{{ .password }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
change_mode = "restart"
}
resources {
cpu = 10
memory = 200
memory_max = 200
}
}
task "bootstrap" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "mariadb:10"
args = [
"/usr/bin/timeout",
"2m",
"/bin/bash",
"-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
]
}
template {
data = <<EOF
[client]
host=127.0.0.1
port=3306
user=root
{{ with nomadVar "secrets/mysql" -}}
password={{ .mysql_root_password }}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/lldap" -}}
{{ $db_name := .db_name }}
CREATE DATABASE IF NOT EXISTS `{{ .db_name }}`
CHARACTER SET = 'utf8mb4'
COLLATE = 'utf8mb4_unicode_ci';
DROP USER IF EXISTS '{{ .db_user }}'@'%';
CREATE USER '{{ .db_user }}'@'%'
IDENTIFIED BY '{{ .db_pass }}';
GRANT ALL ON `{{ .db_name }}`.*
TO '{{ .db_user }}'@'%';
{{ else -}}
SELECT 'NOOP';
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql"
}
resources {
cpu = 50
memory = 50
}
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[ldap_server]
accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:{{ env "NOMAD_PORT_ldap" }}
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ range nomadVarList "secrets/ldap/allowed_psks" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/lldap" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
}
}
}

View File

@ -1,123 +0,0 @@
resource "nomad_job" "lldap" {
jobspec = templatefile("${path.module}/lldap.nomad", {
use_wesher = var.use_wesher,
})
depends_on = [resource.nomad_job.mysql-server]
# Block until deployed as there are servics dependent on this one
detach = false
}
# Give access to ldap secrets
resource "nomad_acl_policy" "lldap_ldap_secrets" {
name = "lldap-secrets-ldap"
description = "Give access to LDAP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/ldap/*" {
capabilities = ["read"]
}
path "secrets/ldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "lldap_ldap_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "lldap_ldap_psk" {
path = "secrets/ldap/allowed_psks/ldap"
items = {
psk = "lldap:${resource.random_password.lldap_ldap_psk.result}"
}
}
# Give access to smtp secrets
resource "nomad_acl_policy" "lldap_smtp_secrets" {
name = "lldap-secrets-smtp"
description = "Give access to SMTP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/smtp" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "lldap"
}
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "lldap_mysql_bootstrap_secrets" {
name = "lldap-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "bootstrap"
}
}
resource "random_password" "lldap_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "lldap_mysql_psk" {
path = "secrets/mysql/allowed_psks/lldap"
items = {
psk = "lldap:${resource.random_password.lldap_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "lldap_mysql_psk" {
name = "lldap-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/lldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "stunnel"
}
}

View File

@ -15,12 +15,10 @@ job "mysql-server" {
network {
mode = "bridge"
port "db" {
static = 3306
host_network = "loopback"
to = 3306
}
port "tls" {}
}
volume "mysql-data" {
@ -31,23 +29,44 @@ job "mysql-server" {
service {
name = "mysql-server"
provider = "nomad"
port = "db"
}
service {
name = "mysql-tls"
provider = "nomad"
port = "tls"
connect {
sidecar_service {
proxy {
local_service_port = 3306
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
# Can't use a tcp check with bridge network or proxy
# check {
# type = "tcp"
# interval = "10s"
# timeout = "2s"
# }
}
task "mysql-server" {
driver = "docker"
config {
image = "mariadb:10"
image = "mysql:8"
ports = ["db"]
args = ["--innodb-buffer-pool-size=1G"]
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
volume_mount {
@ -63,56 +82,17 @@ job "mysql-server" {
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/mysql-server" }}
MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
{{ with secret "kv/data/mysql" }}
MYSQL_ROOT_PASSWORD={{ .Data.data.root_password }}
{{ end }}
EOH
destination = "${NOMAD_SECRETS_DIR}/db.env"
destination = "secrets/db.env"
env = true
}
resources {
cpu = 300
memory = 1536
}
}
task "stunnel" {
driver = "docker"
config {
image = "iamthefij/stunnel:latest"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[mysql_server]
accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:3306
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
EOF
destination = "${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ range nomadVarList "secrets/mysql/allowed_psks" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }}
{{ end -}}
EOF
destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
memory = 1024
}
}
}

View File

@ -1,41 +1,53 @@
resource "nomad_job" "mysql-server" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/mysql.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_acl_policy" "secrets_mysql" {
name = "secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
path "secrets/mysql/*" {
capabilities = ["read"]
}
resource "nomad_job" "adminer" {
hcl2 {
enabled = true
}
}
EOH
job_acl {
# job_id = resource.nomad_job.mysql-server.id
job_id = "mysql-server"
}
jobspec = file("${path.module}/adminer.nomad")
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "mysql_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
# NOTE: This may need to be moved to after the services are created
resource "consul_config_entry" "mysql_intents" {
name = "mysql-server"
kind = "service-intentions"
resource "nomad_variable" "mysql_mysql_psk" {
path = "secrets/mysql/allowed_psks/mysql"
items = {
psk = "mysql:${resource.random_password.mysql_mysql_psk.result}"
}
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "adminer"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "nextcloud"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "backups"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "grafana"
Precedence = 9
Type = "consul"
},
]
})
}

View File

@ -1,9 +1,9 @@
job "postgres-server" {
job "postgres" {
datacenters = ["dc1"]
type = "service"
priority = 80
group "postgres-server" {
group "postgres" {
count = 1
restart {
@ -15,12 +15,10 @@ job "postgres-server" {
network {
mode = "bridge"
port "db" {
static = 5432
host_network = "loopback"
to = 5432
}
port "tls" {}
}
volume "postgres-data" {
@ -30,18 +28,33 @@ job "postgres-server" {
}
service {
name = "postgres-server"
provider = "nomad"
name = "postgres"
port = "db"
connect {
sidecar_service {
proxy {
local_service_port = 5432
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
# Can't use a tcp check with bridge network or proxy
# check {
# type = "tcp"
# interval = "10s"
# timeout = "2s"
# }
}
service {
name = "postgres-tls"
provider = "nomad"
port = "tls"
}
task "postgres-server" {
task "postgres" {
driver = "docker"
config {
@ -49,6 +62,13 @@ job "postgres-server" {
ports = ["db"]
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
volume_mount {
volume = "postgres-data"
destination = "/var/lib/postgresql/data"
@ -56,15 +76,13 @@ job "postgres-server" {
}
env = {
# Allow connections from any host
"MYSQL_ROOT_HOST" = "%"
PGDATA = "/var/lib/postgresql/data/pgdata"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/postgres-server" }}
POSTGRES_USER={{ .superuser }}
POSTGRES_PASSWORD={{ .superuser_pass }}
{{ with secret "kv/data/postgres" }}
POSTGRES_PASSWORD={{ .Data.data.superuser_password }}
{{ end }}
EOH
destination = "secrets/db.env"
@ -72,48 +90,8 @@ POSTGRES_PASSWORD={{ .superuser_pass }}
}
resources {
cpu = 500
memory = 700
memory_max = 1200
}
}
task "stunnel" {
driver = "docker"
config {
image = "iamthefij/stunnel:latest"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[postgres_server]
accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:5432
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
EOF
destination = "${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ range nomadVarList "secrets/postgres/allowed_psks" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }}
{{ end -}}
EOF
destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
cpu = 300
memory = 1024
}
}
}

View File

@ -1,41 +1,39 @@
resource "nomad_job" "postgres-server" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/postgres.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_acl_policy" "secrets_postgres" {
name = "secrets-postgres"
description = "Give access to Postgres secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres" {
capabilities = ["read"]
}
path "secrets/postgres/*" {
capabilities = ["read"]
}
}
}
EOH
# NOTE: This may need to be moved to after the services are created
resource "consul_config_entry" "postgres_intents" {
name = "postgres-server"
kind = "service-intentions"
job_acl {
# job_id = resource.nomad_job.postgres-server.id
job_id = "postgres-server"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "postgres_postgres_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "postgres_postgres_psk" {
path = "secrets/postgres/allowed_psks/postgres"
items = {
psk = "postgres:${resource.random_password.postgres_postgres_psk.result}"
}
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "adminer"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "backups"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "immich"
Precedence = 9
Type = "consul"
},
]
})
}

View File

@ -1,7 +1,7 @@
job "redis-${name}" {
job "redis" {
datacenters = ["dc1"]
type = "service"
priority = 80
priority = 60
group "cache" {
count = 1
@ -15,13 +15,37 @@ job "redis-${name}" {
network {
mode = "bridge"
port "tls" {}
port "main" {
host_network = "loopback"
to = 6379
}
}
service {
name = "redis-${name}"
provider = "nomad"
port = "tls"
name = "redis"
port = "main"
connect {
sidecar_service {
proxy {
local_service_port = 6379
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
# check {
# name = "alive"
# type = "tcp"
# interval = "10s"
# timeout = "2s"
# }
}
task "redis" {
@ -29,54 +53,14 @@ job "redis-${name}" {
config {
image = "redis:6"
args = ["redis-server", "--save", "60", "1", "--loglevel", "warning", "--dir", "$${NOMAD_ALLOC_DIR}/data"]
args = ["redis-server", "--save", "60", "1", "--loglevel", "warning", "--dir", "${NOMAD_ALLOC_DIR}/data"]
ports = ["main"]
}
resources {
cpu = 100
memory = 64
memory_max = 512
}
}
task "stunnel" {
driver = "docker"
config {
image = "iamthefij/stunnel:latest"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 50
memory = 15
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[redis_server]
accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:6379
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/redis-${name}" -}}
{{ .allowed_psks }}
{{- end }}
EOF
destination = "$${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
memory = 512
memory_max = 1024
}
}
}

View File

@ -1,12 +1,46 @@
resource "nomad_job" "redis" {
for_each = toset(["blocky", "authelia"])
hcl2 {
enabled = true
}
jobspec = templatefile("${path.module}/redis.nomad",
{
name = each.key,
}
)
jobspec = file("${path.module}/redis.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_job" "rediscommander" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/rediscommander.nomad")
}
resource "consul_config_entry" "redis_intents" {
name = "redis"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "blocky-api"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "rediscommander"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "authelia"
Precedence = 9
Type = "consul"
},
]
})
}

View File

@ -0,0 +1,65 @@
job "rediscommander" {
datacenters = ["dc1"]
type = "service"
group "rediscommander" {
count = 1
network {
mode = "bridge"
port "main" {
host_network = "loopback"
to = 8081
}
}
service {
name = "rediscommander"
port = "main"
connect {
sidecar_service {
proxy {
local_service_port = 8081
upstreams {
destination_name = "redis"
local_bind_port = 6379
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 25
}
}
}
tags = [
"traefik.enable=true",
"traefik.http.routers.rediscommander.entryPoints=websecure",
]
}
task "rediscommander" {
driver = "docker"
config {
image = "rediscommander/redis-commander:latest"
ports = ["main"]
}
env = {
"REDIS_HOSTS" = "local:${NOMAD_UPSTREAM_ADDR_redis}"
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -1,5 +0,0 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

View File

@ -6,9 +6,8 @@ job "ddclient" {
task "ddclient" {
driver = "docker"
config {
image = "ghcr.io/linuxserver/ddclient:v3.10.0-ls104"
image = "linuxserver/ddclient:3.9.1"
mount {
type = "bind"
@ -17,22 +16,30 @@ job "ddclient" {
}
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/ddclient" -}}
daemon=900
ssl=yes
use=web
web=api.myip.com
protocol=cloudflare,
zone={{ .zone }},
zone={{ key "ddclient/zone" }},
ttl=1,
login=token,
password={{ .domain_ddclient }}
{{ with secret "kv/data/cloudflare" -}}
login={{ .Data.data.api_user }},
password={{ .Data.data.api_key }}
# login=token,
# password={{ .Data.data.api_token_dns_edit_all }}
{{ end -}}
{{ .domain }}
{{- end }}
{{ key "ddclient/domain" }}
EOH
destination = "secrets/ddclient.conf"
change_mode = "restart"

144
hashi-up.sh Normal file
View File

@ -0,0 +1,144 @@
#!/usr/bin/env bash
export VERIFY_CHECKSUM=0
export ALIAS_NAME=
export OWNER=jsiebens
export REPO=hashi-up
export SUCCESS_CMD="$REPO version"
export BINLOCATION="~/bin"
###############################
# Content common across repos #
###############################
version=$(curl -sI https://github.com/$OWNER/$REPO/releases/latest | grep -i location: | awk -F"/" '{ printf "%s", $NF }' | tr -d '\r')
if [ ! $version ]; then
echo "Failed while attempting to install $REPO. Please manually install:"
echo ""
echo "1. Open your web browser and go to https://github.com/$OWNER/$REPO/releases"
echo "2. Download the latest release for your platform. Call it '$REPO'."
echo "3. chmod +x ./$REPO"
echo "4. mv ./$REPO $BINLOCATION"
if [ -n "$ALIAS_NAME" ]; then
echo "5. ln -sf $BINLOCATION/$REPO /usr/local/bin/$ALIAS_NAME"
fi
exit 1
fi
getPackage() {
uname=$(uname)
userid=$(id -u)
suffix=""
case $uname in
"Darwin")
suffix="-darwin"
;;
"MINGW"*)
suffix=".exe"
BINLOCATION="$HOME/bin"
mkdir -p $BINLOCATION
;;
"Linux")
arch=$(uname -m)
case $arch in
"aarch64")
suffix="-arm64"
;;
esac
case $arch in
"armv6l" | "armv7l")
suffix="-armhf"
;;
esac
;;
esac
targetFile="/tmp/$REPO$suffix"
if [ "$userid" != "0" ]; then
targetFile="$(pwd)/$REPO$suffix"
fi
if [ -e "$targetFile" ]; then
rm "$targetFile"
fi
url=https://github.com/$OWNER/$REPO/releases/download/$version/$REPO$suffix
echo "Downloading package $url as $targetFile"
curl -sSL $url --output "$targetFile"
if [ "$?" = "0" ]; then
if [ "$VERIFY_CHECKSUM" = "1" ]; then
checkHash
fi
chmod +x "$targetFile"
echo "Download complete."
if [ ! -w "$BINLOCATION" ]; then
echo
echo "============================================================"
echo " The script was run as a user who is unable to write"
echo " to $BINLOCATION. To complete the installation the"
echo " following commands may need to be run manually."
echo "============================================================"
echo
echo " sudo cp $REPO$suffix $BINLOCATION/$REPO"
if [ -n "$ALIAS_NAME" ]; then
echo " sudo ln -sf $BINLOCATION/$REPO $BINLOCATION/$ALIAS_NAME"
fi
echo
else
echo
echo "Running with sufficient permissions to attempt to move $REPO to $BINLOCATION"
if [ ! -w "$BINLOCATION/$REPO" ] && [ -f "$BINLOCATION/$REPO" ]; then
echo
echo "================================================================"
echo " $BINLOCATION/$REPO already exists and is not writeable"
echo " by the current user. Please adjust the binary ownership"
echo " or run sh/bash with sudo."
echo "================================================================"
echo
exit 1
fi
mv "$targetFile" $BINLOCATION/$REPO
if [ "$?" = "0" ]; then
echo "New version of $REPO installed to $BINLOCATION"
fi
if [ -e "$targetFile" ]; then
rm "$targetFile"
fi
if [ $(which $ALIAS_NAME) ]; then
echo "There is already a command '$ALIAS_NAME' in the path, NOT creating alias"
else
if [ -n "$ALIAS_NAME" ]; then
if [ ! -L $BINLOCATION/$ALIAS_NAME ]; then
ln -s $BINLOCATION/$REPO $BINLOCATION/$ALIAS_NAME
echo "Creating alias '$ALIAS_NAME' for '$REPO'."
fi
fi
fi
${SUCCESS_CMD}
fi
fi
}
getPackage

344
immich.nomad Normal file
View File

@ -0,0 +1,344 @@
variable "postgres_image" {
type = string
default = "postgres:14"
}
variable "immich_tag" {
type = string
default = "release"
}
job "immich" {
datacenters = ["dc1"]
type = "service"
group "immich" {
count = 1
network {
mode = "bridge"
port "server" {
host_network = "loopback"
to = 3001
}
port "microservices" {
host_network = "loopback"
to = 3001
}
port "web" {
host_network = "loopback"
to = 3000
}
port "proxy" {
host_network = "loopback"
to = 80
}
}
volume "immich-upload" {
type = "host"
read_only = false
source = "immich-upload"
}
service {
name = "immich"
port = "proxy"
connect {
sidecar_service {
proxy {
local_service_port = 80
upstreams {
destination_name = "redis"
local_bind_port = 6379
}
upstreams {
destination_name = "postgres"
local_bind_port = 5432
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
tags = [
"traefik.enable=true",
]
}
task "immich-bootstrap" {
driver = "docker"
config {
image = "${var.postgres_image}"
args = [
"/bin/bash",
"-c",
"/usr/bin/psql --no-password -f ${NOMAD_SECRETS_DIR}/bootstrap.sql",
]
}
resources {
cpu = 50
memory = 20
memory_max = 100
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
env {
PGHOST = "${NOMAD_UPSTREAM_IP_postgres}"
PGPORT = "${NOMAD_UPSTREAM_PORT_postgres}"
PGUSER = "root"
}
template {
data = <<EOH
{{ with secret "kv/data/postgres" }}
PGPASSWORD={{ .Data.data.superuser_password }}
{{ end }}
EOH
destination = "secrets/pgpass.env"
env = true
}
template {
data = <<EOF
{{ with secret "kv/data/immich" }}
DB_DATABASE_NAME={{ .Data.data.db_name }}
DB_USERNAME={{ .Data.data.db_user }}
DB_PASSWORD={{ .Data.data.db_pass }}
{{ end }}
EOF
destination = "secrets/immich-db.env"
env = true
}
template {
data = <<EOH
{{ with secret "kv/data/immich" }}
DO
$do$
BEGIN
IF EXISTS (
SELECT FROM pg_catalog.pg_roles
WHERE rolname = '{{ .Data.data.db_user }}') THEN
RAISE NOTICE 'Role "{{ .Data.data.db_user }}" already exists. Skipping.';
ELSE
CREATE ROLE {{ .Data.data.db_user }} LOGIN PASSWORD '{{ .Data.data.db_pass }}';
END IF;
IF EXISTS (SELECT FROM pg_database WHERE datname = '{{ .Data.data.db_name }}') THEN
RAISE NOTICE 'Database already exists'; -- optional
ELSE
PERFORM dblink_exec('dbname=' || current_database() -- current db
, 'CREATE DATABASE {{ .Data.data.db_name }}');
REVOKE ALL ON DATABASE {{ .Data.data.db_name }} FROM public;
GRANT ALL PRIVILEGES ON DATABASE {{ .Data.data.db_name }} TO {{ .Data.data.db_user }};
END IF;
END
$do$;
{{ end }}
EOH
destination = "secrets/bootstrap.sql"
}
}
task "immich-server" {
driver = "docker"
volume_mount {
volume = "immich-upload"
destination = "/usr/src/app/upload"
read_only = false
}
config {
image = "altran1502/immich-server:${var.immich_tag}"
entrypoint = ["/bin/sh", "./start-server.sh"]
ports = ["server"]
}
resources {
cpu = 100
memory = 200
}
env {
NODE_ENV = "production"
REDIS_HOSTNAME = "${NOMAD_UPSTREAM_IP_redis}"
REDIS_PORT = "${NOMAD_UPSTREAM_PORT_redis}"
# REDIS_DBINDEX=0
# REDIS_PASSWORD=
# REDIS_SOCKET=
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
template {
data = <<EOF
DB_HOSTNAME="${NOMAD_UPSTREAM_IP_postgres}"
DB_PORT="${NOMAD_UPSTREAM_PORT_postgres}"
{{ with secret "kv/data/immich" }}
DB_DATABASE_NAME={{ .Data.data.db_name }}
DB_USERNAME={{ .Data.data.db_user }}
DB_PASSWORD={{ .Data.data.db_pass }}
{{ end }}
EOF
destination = "secrets/db.env"
env = true
}
}
task "immich-microservices" {
driver = "docker"
volume_mount {
volume = "immich-upload"
destination = "/usr/src/app/upload"
read_only = false
}
config {
image = "altran1502/immich-server:${var.immich_tag}"
entrypoint = ["/bin/sh", "./start-microservices.sh"]
ports = ["microservices"]
}
resources {
cpu = 100
memory = 50
memory_max = 200
}
env {
NODE_ENV = "production"
REDIS_HOSTNAME = "${NOMAD_UPSTREAM_IP_redis}"
REDIS_PORT = "${NOMAD_UPSTREAM_PORT_redis}"
# REDIS_DBINDEX=0
# REDIS_PASSWORD=
# REDIS_SOCKET=
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
template {
data = <<EOF
DB_HOSTNAME="${NOMAD_UPSTREAM_IP_postgres}"
DB_PORT="${NOMAD_UPSTREAM_PORT_postgres}"
{{ with secret "kv/data/immich" }}
DB_DATABASE_NAME={{ .Data.data.db_name }}
DB_USERNAME={{ .Data.data.db_user }}
DB_PASSWORD={{ .Data.data.db_pass }}
{{ end }}
EOF
destination = "secrets/db.env"
env = true
}
}
task "immich-machine-learning" {
driver = "docker"
volume_mount {
volume = "immich-upload"
destination = "/usr/src/app/upload"
read_only = false
}
config {
image = "altran1502/immich-machine-learning:${var.immich_tag}"
entrypoint = ["/bin/sh", "./entrypoint.sh"]
}
resources {
cpu = 500
memory = 100
memory_max = 500
}
env {
NODE_ENV = "production"
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
template {
data = <<EOF
DB_HOSTNAME="${NOMAD_UPSTREAM_IP_postgres}"
DB_PORT="${NOMAD_UPSTREAM_PORT_postgres}"
{{ with secret "kv/data/immich" }}
DB_DATABASE_NAME={{ .Data.data.db_name }}
DB_USERNAME={{ .Data.data.db_user }}
DB_PASSWORD={{ .Data.data.db_pass }}
{{ end }}
EOF
destination = "secrets/db.env"
env = true
}
}
task "immich-web" {
driver = "docker"
config {
image = "altran1502/immich-web:${var.immich_tag}"
entrypoint = ["/bin/sh", "./entrypoint.sh"]
ports = ["web"]
}
resources {
cpu = 50
memory = 50
}
}
task "immich-proxy" {
driver = "docker"
config {
ports = ["proxy"]
image = "altran1502/immich-proxy:${var.immich_tag}"
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -0,0 +1,40 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/external" {
version = "2.2.2"
hashes = [
"h1:BKQ5f5ijzeyBSnUr+j0wUi+bYv6KBQVQNDXNRVEcfJE=",
"zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca",
"zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28",
"zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b",
"zh:719d6ef39c50e4cffc67aa67d74d195adaf42afcf62beab132dafdb500347d39",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:7fbfc4d37435ac2f717b0316f872f558f608596b389b895fcb549f118462d327",
"zh:8ac71408204db606ce63fe8f9aeaf1ddc7751d57d586ec421e62d440c402e955",
"zh:a4cacdb06f114454b6ed0033add28006afa3f65a0ea7a43befe45fc82e6809fb",
"zh:bb5ce3132b52ae32b6cc005bc9f7627b95259b9ffe556de4dad60d47d47f21f0",
"zh:bb60d2976f125ffd232a7ccb4b3f81e7109578b23c9c6179f13a11d125dca82a",
"zh:f9540ecd2e056d6e71b9ea5f5a5cf8f63dd5c25394b9db831083a9d4ea99b372",
"zh:ffd998b55b8a64d4335a090b6956b4bf8855b290f7554dd38db3302de9c41809",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.17"
hashes = [
"h1:oWV3VXZhqPZ8Ia07nlIZLeXDBqVULMg9lP3dVMczDCo=",
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
]
}

7
levant/README.md Normal file
View File

@ -0,0 +1,7 @@
# Terraform Levant
This module renders a levant template and then creates a Nomad job based on that template.
It only covers a subset of levant capabilities because much else can be done with Terraform already.
required:

63
levant/levant.py Executable file
View File

@ -0,0 +1,63 @@
#! /usr/bin/env python3
import json
import sys
from subprocess import check_output
from typing import Optional
from typing import overload
from typing import TypeVar
T = TypeVar("T")
@overload
def get_json(d: dict[str, str], key: str, default: None = None) -> None:
...
@overload
def get_json(d: dict[str, str], key: str, default: T = None) -> T:
...
def get_json(d: dict[str, str], key: str, default: Optional[T] = None) -> Optional[T]:
if key not in d:
return default
return json.loads(d[key])
query = json.load(sys.stdin)
# Required
template_path = query["template_path"]
# Optional
consul_address = query.get("consul_address")
if consul_address is not None:
consul_address = f"-consul-address={consul_address}"
# Need to parse JSON back
variables = [
f'--var={key}={value}' for key, value in get_json(query, "variables", {}).items()
]
variable_files = [
f'--var-file={value}' for value in get_json(query, "var_files", [])
]
args: list[str] = list(
filter(
None,
["levant", "render", consul_address]
+ variables
+ variable_files
+ [template_path],
)
)
# print(" ".join(args), file=sys.stderr)
# exit(1)
template = check_output(args, stderr=sys.stderr)
print(json.dumps({"template": template.decode()}))

37
levant/main.tf Normal file
View File

@ -0,0 +1,37 @@
variable "template_path" {
type = string
nullable = false
}
variable "consul_address" {
type = string
default = null
nullable = true
description = "Consul host and port for making KeyValue lookups"
}
variable "variables" {
type = map(string)
description = "Variables to be passed into nomad-pack with values in JSON form"
default = {}
}
variable "var_files" {
type = list(string)
description = "HCL files containing variables to be used by nomad-pack"
default = []
}
data "external" "levant" {
program = ["${path.module}/levant.py"]
query = {
template_path = var.template_path
consul_address = var.consul_address
variables = jsonencode(var.variables)
var_files = jsonencode(var.var_files)
}
}
resource "nomad_job" "levant" {
jobspec = data.external.levant.result.template
}

2
levant/test.nomad Normal file
View File

@ -0,0 +1,2 @@
job {
}

Some files were not shown because too many files have changed in this diff Show More