Compare commits

..

2 Commits

Author SHA1 Message Date
7658cfd0fe WIP: Set up step-ca
Unsure of the best way to setup bootstrapping the system. Do I run an
ansible playbook to generate certificates offline and then bootstrap
with that? Can I bring it online after and schedule with Nomad?
2022-03-22 16:39:39 -07:00
f2ce718b33 Add Nomad ACL bootstrap 2022-03-21 21:26:04 -07:00
172 changed files with 2423 additions and 17662 deletions

51
.gitignore vendored
View File

@ -1,53 +1,4 @@
# ---> Terraform roles/
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sentitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
#
*.tfvars
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
#
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
# ---> Ansible
*.retry
ansible_galaxy/ansible_collections/
ansible_galaxy/roles/
# Repo specific
venv/ venv/
ca/
# Non-public bootstrap values
vault-keys.json vault-keys.json
nomad_bootstrap.json nomad_bootstrap.json
consul_values.yml
vault_hashi_vault_values.yml
vault_*.yml
ansible_playbooks/vars/nomad_vars.yml

View File

@ -1,34 +0,0 @@
---
repos:
- repo: https://github.com/antonbabenko/pre-commit-terraform
rev: v1.76.0
hooks:
- id: terraform_fmt
- id: terraform_validate
args:
- --tf-init-args=-lockfile=readonly
- id: terraform_tflint
args:
- --args=--config=__GIT_WORKING_DIR__/.tflint.hcl
- id: terraform_tfsec
# - id: terraform_providers_lock
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
- id: check-added-large-files
- id: check-merge-conflict
- id: end-of-file-fixer
exclude: "^ansible_playbooks/vars/nomad_vars.sample.yml$"
- id: trailing-whitespace
- repo: https://github.com/Yelp/detect-secrets
rev: v1.4.0
hooks:
- id: detect-secrets
args: ['--baseline', '.secrets-baseline']
- repo: local
hooks:
- id: variable-sample
name: generate variable sample file
language: system
entry: bash -c 'venv/bin/python scripts/nomad_vars.py print > ./ansible_playbooks/vars/nomad_vars.sample.yml'
types: [file]

View File

@ -1,191 +0,0 @@
{
"version": "1.4.0",
"plugins_used": [
{
"name": "ArtifactoryDetector"
},
{
"name": "AWSKeyDetector"
},
{
"name": "AzureStorageKeyDetector"
},
{
"name": "Base64HighEntropyString",
"limit": 4.5
},
{
"name": "BasicAuthDetector"
},
{
"name": "CloudantDetector"
},
{
"name": "DiscordBotTokenDetector"
},
{
"name": "GitHubTokenDetector"
},
{
"name": "HexHighEntropyString",
"limit": 3.0
},
{
"name": "IbmCloudIamDetector"
},
{
"name": "IbmCosHmacDetector"
},
{
"name": "JwtTokenDetector"
},
{
"name": "KeywordDetector",
"keyword_exclude": ""
},
{
"name": "MailchimpDetector"
},
{
"name": "NpmDetector"
},
{
"name": "PrivateKeyDetector"
},
{
"name": "SendGridDetector"
},
{
"name": "SlackDetector"
},
{
"name": "SoftlayerDetector"
},
{
"name": "SquareOAuthDetector"
},
{
"name": "StripeDetector"
},
{
"name": "TwilioKeyDetector"
}
],
"filters_used": [
{
"path": "detect_secrets.filters.allowlist.is_line_allowlisted"
},
{
"path": "detect_secrets.filters.common.is_baseline_file",
"filename": ".secrets-baseline"
},
{
"path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies",
"min_level": 2
},
{
"path": "detect_secrets.filters.heuristic.is_indirect_reference"
},
{
"path": "detect_secrets.filters.heuristic.is_likely_id_string"
},
{
"path": "detect_secrets.filters.heuristic.is_lock_file"
},
{
"path": "detect_secrets.filters.heuristic.is_not_alphanumeric_string"
},
{
"path": "detect_secrets.filters.heuristic.is_potential_uuid"
},
{
"path": "detect_secrets.filters.heuristic.is_prefixed_with_dollar_sign"
},
{
"path": "detect_secrets.filters.heuristic.is_sequential_string"
},
{
"path": "detect_secrets.filters.heuristic.is_swagger_file"
},
{
"path": "detect_secrets.filters.heuristic.is_templated_secret"
},
{
"path": "detect_secrets.filters.regex.should_exclude_secret",
"pattern": [
"(\\${.*}|from_env|fake|!secret|VALUE)"
]
}
],
"results": {
"core/authelia.yml": [
{
"type": "Secret Keyword",
"filename": "core/authelia.yml",
"hashed_secret": "7cb6efb98ba5972a9b5090dc2e517fe14d12cb04",
"is_verified": false,
"line_number": 54,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/authelia.yml",
"hashed_secret": "a32b08d97b1615dc27f58b6b17f67624c04e2c4f",
"is_verified": false,
"line_number": 201,
"is_secret": false
}
],
"core/grafana/grafana.ini": [
{
"type": "Basic Auth Credentials",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4",
"is_verified": false,
"line_number": 78,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "55ebda65c08313526e7ba08ad733e5ebea9900bd",
"is_verified": false,
"line_number": 109,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "d033e22ae348aeb5660fc2140aec35850c4da997",
"is_verified": false,
"line_number": 151,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "10bea62ff1e1a7540dc7a6bc10f5fa992349023f",
"is_verified": false,
"line_number": 154,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "5718bce97710e6be87ea160b36eaefb5032857d3",
"is_verified": false,
"line_number": 239,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "10aed9d7ebef778a9b3033dba3f7813b639e0d50",
"is_verified": false,
"line_number": 252,
"is_secret": false
}
]
},
"generated_at": "2024-08-30T18:12:43Z"
}

76
.terraform.lock.hcl generated
View File

@ -1,40 +1,56 @@
# This file is maintained automatically by "terraform init". # This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/consul" {
version = "2.2.0" version = "2.14.0"
hashes = [ hashes = [
"h1:BAjqzVkuXxHtRKG+l9unaZJPk2kWZpSTCEcQPRcl2so=", "h1:xRwktNwLL3Vo43F7v73tfcgbcnjCE2KgCzcNrsQJ1cc=",
"zh:052f909d25121e93dc799290216292fca67943ccde12ba515068b838a6ff8c66", "zh:06dcca1f76b839af8f86c7b6f65b944003a7a35b30b865b3884f48e2c42f9aee",
"zh:20e29aeb9989f7a1e04bb4093817c7acc4e1e737bb21a3066f3ea46f2001feff", "zh:16111df6a485e21cee6ca33cb863434baa1ca360c819c8e2af85e465c1361d2b",
"zh:2326d101ef427599b72cce30c0e0c1d18ae783f1a897c20f2319fbf54bab0a61", "zh:26b59c82ac2861b2651c1fa31955c3e7790e3c2d5d097f22aa34d3c294da63cf",
"zh:3420cbe4fd19cdc96d715d0ae8e79c272608023a76033bbf582c30637f6d570f", "zh:70fd6853099126a602d5ac26caa80214a4a8a38f0cad8a5e3b7bef49923419d3",
"zh:41ec570f87f578f1c57655e2e4fbdb9932d94cf92dc9cd11828cccedf36dd4a4", "zh:7d4f0061d6fb86e0a5639ed02381063b868245082ec4e3a461bcda964ed00fcc",
"zh:5f90dcc58e3356ffead82ea211ecb4a2d7094d3c2fbd14ff85527c3652a595a2", "zh:a48cbf57d6511922362d5b0f76f449fba7a550c9d0702635fabb43b4f0a09fc0",
"zh:64aaa48609d2db868fcfd347490df0e12c6c3fcb8e4f12908c5d52b1a0adf73f", "zh:bb54994a53dd8e1ff84ca50742ce893863dc166fd41b91d951f4cb89fe6a6bc0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:bc61b19ee3c8d55a9915a3ad84203c87bfd0d57eca8eec788524b14e8b67f090",
"zh:86b4923e10e6ba407d1d2aab83740b702058e8b01460af4f5f0e4008f40e492c", "zh:cbe3238e756ada23c1e7c97c42a5c72bf810dc5bd1265c9f074c3e739d1090b0",
"zh:ae89dcba33097af33a306344d20e4e25181f15dcc1a860b42db5b7199a97c6a6", "zh:e30198054239eab46493e59956b9cd8c376c3bbd9515ac102a96d1fbd32e423f",
"zh:ce56d68cdfba60891765e94f9c0bf69eddb985d44d97db9f91874bea027f08e2", "zh:e74365dba529a0676107e413986d7be81c2125c197754ce69e3e89d8daa53153",
"zh:e993bcde5dbddaedf3331e3014ffab904f98ab0f5e8b5d6082b7ca5083e0a2f1",
] ]
} }
provider "registry.terraform.io/hashicorp/random" { provider "registry.terraform.io/hashicorp/nomad" {
version = "3.6.0" version = "1.4.16"
hashes = [ hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=", "h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d", "zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211", "zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829", "zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d", "zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055", "zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17", "zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21", "zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839", "zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0", "zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c", "zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e", ]
}
provider "registry.terraform.io/hashicorp/vault" {
version = "3.3.1"
hashes = [
"h1:SOTmxGynxFf1hECFq0/FGujGQZNktePze/4mfdR/iiU=",
"zh:3e1866037f43c1083ff825dce2a9e3853c757bb0121c5ae528ee3cf3f99b4113",
"zh:49636cc5c4939134e098c4ec0163c41fae103f24d7e1e8fc0432f8ad93d596a0",
"zh:5258a7001719c4aeb84f4c4da7115b795da4794754938a3c4176a4b578fe93a1",
"zh:7461738691e2e8ea91aba73d4351cfbc30fcaedcf0e332c9d35ef215f93aa282",
"zh:815529478e33a6727273b08340a4c62c9aeb3da02abf8f091bb4f545c8451fce",
"zh:8e6fede9f5e25b507faf6cacd61b997035b8b62859245861149ddb2990ada8eb",
"zh:9acc2387084b9c411e264c4351633bc82f9c4e420f8e6bbad9f87b145351f929",
"zh:b9e4af3b06386ceed720f0163a1496088c154aa1430ae072c525ffefa4b37891",
"zh:c7d5dfb8f8536694db6740e2a4afd2d681b60b396ded469282524c62ce154861",
"zh:d0850be710c6fd682634a2f823beed0164231cc873b1dc09038aa477c926f57c",
"zh:e90c2cba9d89db5eab295b2f046f24a53f23002bcfe008633d398fb3fa16d941",
] ]
} }

View File

@ -1,7 +0,0 @@
rule "terraform_required_version" {
enabled = false
}
rule "terraform_required_providers" {
enabled = false
}

153
Makefile
View File

@ -1,105 +1,80 @@
SLEEP_FOR ?= 10 SERVER ?= "192.168.2.41"
VENV ?= venv SSH_USER = iamthefij
SSH_KEY = ~/.ssh/id_ed25519
.PHONY: sleep .PHONY: rm-nomad
sleep: rm-nomad:
sleep $(SLEEP_FOR) hashi-up nomad uninstall \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: default .PHONY: nomad-up
default: check nomad-up:
hashi-up nomad install \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS) \
--server --client
hashi-up nomad start \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: all .PHONY: rm-consul
all: cluster bootstrap-values apply rm-consul:
hashi-up consul uninstall \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: consul-up
consul-up:
hashi-up consul install \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS) \
--advertise-addr $(SERVER) \
--client-addr 0.0.0.0 \
--http-addr 0.0.0.0 \
--connect \
--server
hashi-up consul start \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: cluster .PHONY: cluster
cluster: ansible-cluster cluster: consul-up nomad-up
# Ensures virtualenv is present venv/bin/ansible:
$(VENV): python3 -m venv venv
python3 -m venv $(VENV) ./venv/bin/pip install ansible
$(VENV)/bin/pip install -r requirements.txt ./venv/bin/pip install python-consul
# Installs pre-commit hooks
.PHONY: install-hooks
install-hooks: $(VENV)
$(VENV)/bin/pre-commit install --install-hooks
# Checks files for encryption
.PHONY: check
check: $(VENV)
$(VENV)/bin/pre-commit run --all-files
# Creates a new secrets baseline
.secrets-baseline: $(VENV) Makefile
$(VENV)/bin/detect-secrets scan --exclude-secrets '(\$${.*}|from_env|fake|!secret|VALUE)' > .secrets-baseline
# Audits secrets against baseline
.PHONY: secrets-audit
secrets-audit: $(VENV) .secrets-baseline
$(VENV)/bin/detect-secrets audit .secrets-baseline
# Updates secrets baseline
.PHONY: secrets-update
secrets-update: $(VENV) .secrets-baseline
$(VENV)/bin/detect-secrets scan --baseline .secrets-baseline
.PHONY: ansible_galaxy
ansible_galaxy: ansible_galaxy/ansible_collections ansible_galaxy/roles
ansible_galaxy/ansible_collections: $(VENV) ./ansible_galaxy/requirements.yml
$(VENV)/bin/ansible-galaxy collection install -p ./ansible_galaxy -r ./ansible_galaxy/requirements.yml
ansible_galaxy/roles: $(VENV) ./ansible_galaxy/requirements.yml
$(VENV)/bin/ansible-galaxy install -p ./ansible_galaxy/roles -r ./ansible_galaxy/requirements.yml
.PHONY: ansible-cluster .PHONY: ansible-cluster
ansible-cluster: $(VENV) ansible_galaxy ansible-cluster: venv/bin/ansible
env VIRTUAL_ENV=$(VENV) $(VENV)/bin/ansible-playbook -K -vv \ ./venv/bin/ansible-galaxy install -p roles -r roles/requirements.yml
$(shell test -f vault-keys.json && echo '-e "@vault-keys.json"') \ ./venv/bin/ansible-playbook -K -vv \
./ansible_playbooks/setup-cluster.yml -e "@vault-keys.json" \
-i ansible_hosts.yml -M ./roles ./setup-cluster.yml
.PHONY: bootstrap-values
bootstrap-values: $(VENV)
env NOMAD_ADDR=http://192.168.2.101:4646 \
NOMAD_TOKEN=$(shell jq -r .SecretID nomad_bootstrap.json) \
$(VENV)/bin/python ./scripts/nomad_vars.py
.PHONY: recover-nomad
recover-nomad: $(VENV)
$(VENV)/bin/ansible-playbook -K ./ansible_playbooks/recover-nomad.yaml
.PHONY: stop-cluster
stop-cluster: $(VENV)
$(VENV)/bin/ansible-playbook -K ./ansible_playbooks/stop-cluster.yml
.PHONY: init
init:
@terraform init
.PHONY: plan .PHONY: plan
plan: plan:
@terraform plan \ terraform plan
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: apply .PHONY: apply
apply: apply:
@terraform apply \ terraform apply
-auto-approve \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: refresh # Install CNI on hosts?
refresh: # curl -L -o cni-plugins.tgz "https://github.com/containernetworking/plugins/releases/download/v1.0.0/cni-plugins-linux-$( [ $(uname -m) = aarch64 ] && echo arm64 || echo amd64)"-v1.0.0.tgz
@terraform refresh \ # sudo mkdir -p /opt/cni/bin
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \ # sudo tar -C /opt/cni/bin -xzf cni-plugins.tgz
.PHONY: destroy
destroy:
@terraform destroy \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: clean
clean:
env VIRTUAL_ENV=$(VENV) $(VENV)/bin/ansible-playbook -vv \
./ansible_playbooks/clear-data.yml
find -name "*.tfstate" -exec rm '{}' \;
rm -f ./vault-keys.json ./nomad_bootstrap.json

View File

@ -1,46 +0,0 @@
# Homelab Nomad
My configuration for creating my home Nomad cluster and deploying services to it.
This repo is not designed as general purpose templates, but rather to fit my specific needs. That said, I have made an effort for things to be as useful as possible for someone wanting to use or modify this.
## Running
make all
## Design
Both Ansible and Terraform are used as part of this configuration. All hosts must be reachable over SSH prior to running any of this configuration.
To begin, Ansible runs a playbook to setup the cluster. This includes installing Nomad, bootstrapping the cluster and ACLs, setting up NFS shares, creating Nomad Host Volumes, and setting up Wesher as a Wireguard mesh between hosts.
After this is complete, Nomad variables must be set for services to access and configure correctly. This depends on variables to be set based on the sample file.
Finally, the Terraform configuration can be applied setting up all services deployed on the cluster.
The configuration of new services is intended to be as templated as possible and to avoid requiring changes in multiple places. For example, most services are configured with a template that provides reverse proxy, DNS records, database tunnels, database bootstrapping, metrics scraping, and authentication. The only real exception is backups, which requires a distinct job file, for now.
## What does it do?
* Nomad cluster for scheduling and configuring all services
* Blocky DNS servers with integrated ad blocking. This also provides service discovery
* Prometheus with autodiscovery of service metrics
* Loki and Promtail aggregating logs
* Minitor for service availability checks
* Grafana providing dashboards, alerting, and log searching
* Photoprism for photo management
* Remote and shared volumes over NFS
* Authelia for OIDC and Proxy based authentication with 2FA
* Sonarr and Lidarr for multimedia management
* Automated block based backups using Restic
## Step by step
1. Update hosts in `ansible_playbooks/ansible_hosts.yml`
2. Update `ansible_playbook/setup-cluster.yml`
1. Update backup DNS server
2. Update NFS shares from NAS
3. Update volumes to make sure they are valid paths
3. Create `ansible_playbooks/vars/nomad_vars.yml` based on the sample file. TODO: This is quite specific and probably impossible without more documentation
4. Run `make all`
5. Update your network DNS settings to use the new servers IP addresses

5
acls.tf Normal file
View File

@ -0,0 +1,5 @@
# resource "nomad_acl_policy" "create_post_bootstrap_policy" {
# name = "anonymous"
# description = "Anon RW"
# rules_hcl = file("${path.module}/acls/nomad-anon-bootstrap.hcl")
# }

View File

@ -1,21 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.20"
hashes = [
"h1:M/QVXHPfeySejJZI3I8mBYrL/J9VsbnyF/dKIMlUhXo=",
"zh:02989edcebe724fc0aa873b22176fd20074c4f46295e728010711a8fc5dfa72c",
"zh:089ba7d19bcf5c6bab3f8b8c5920eb6d78c52cf79bb0c5dfeb411c600e7efcba",
"zh:235865a2182ca372bcbf440201a8b8cc0715ad5dbc4de893d99b6f32b5be53ab",
"zh:67ea718764f3f344ecc6e027d20c1327b86353c8064aa90da3ec12cec4a88954",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:8c68c540f0df4980568bdd688c2adec86eda62eb2de154e3db215b16de0a7ae0",
"zh:911969c63a69a733be57b96d54c5966c9424e1abec8d5f20038c8cef3a504c65",
"zh:a673c92ddc9d47e8d53dcb9b376f1adcb4543488202fc83a3e7eab8677530684",
"zh:a94a73eae89fd8c8ebf872013079be41161d3f293f4026c92d45c4c5667dd613",
"zh:db6b89f8b696040c0344f00928e4cf6e0a75034421ba14cdcd8a4d23bc865dce",
"zh:e512c0b1239e3d66b60d22c2b4de19fea288e492cde90dff9277cc475fd9dbbf",
"zh:ef6eccecbdef3bb8ce629cabfb5550c1db5c3e952943dda1786ef6cb470a8c23",
]
}

View File

@ -1,12 +1,6 @@
namespace "*" { namespace "*" {
policy = "write" policy = "write"
capabilities = ["alloc-node-exec"] capabilities = ["alloc-node-exec"]
variables {
path "*" {
capabilities = ["write", "read", "destroy"]
}
}
} }
agent { agent {

View File

@ -1,23 +0,0 @@
namespace "*" {
policy = "read"
}
agent {
policy = "read"
}
operator {
policy = "read"
}
quota {
policy = "read"
}
node {
policy = "read"
}
host_volume "*" {
policy = "read"
}

View File

@ -1,4 +0,0 @@
namespace "*" {
policy = "read"
capabilities = ["submit-job", "dispatch-job", "read-logs"]
}

View File

@ -1,18 +0,0 @@
resource "nomad_acl_policy" "anon_policy" {
name = "anonymous"
description = "Anon read only"
rules_hcl = file("${path.module}/nomad-anon-policy.hcl")
}
resource "nomad_acl_policy" "admin" {
name = "admin"
description = "Admin RW for admins"
rules_hcl = file("${path.module}/nomad-admin-policy.hcl")
}
# TODO: (security) Limit this scope
resource "nomad_acl_policy" "deploy" {
name = "deploy"
description = "Write for job deployments"
rules_hcl = file("${path.module}/nomad-deploy-policy.hcl")
}

View File

@ -1,17 +0,0 @@
resource "nomad_acl_role" "admin" {
name = "admin"
description = "Nomad administrators"
policy {
name = nomad_acl_policy.admin.name
}
}
resource "nomad_acl_role" "deploy" {
name = "deploy"
description = "Authorized to conduct deployments and view logs"
policy {
name = nomad_acl_policy.deploy.name
}
}

View File

@ -1,6 +0,0 @@
# Configure the Nomad provider
provider "nomad" {
address = var.nomad_address
secret_id = var.nomad_secret_id
region = "global"
}

View File

@ -1,11 +0,0 @@
variable "nomad_secret_id" {
type = string
description = "Secret ID for ACL bootstrapped Nomad"
sensitive = true
default = ""
}
variable "nomad_address" {
type = string
default = "http://n1.thefij:4646"
}

View File

@ -1,7 +0,0 @@
[defaults]
inventory=ansible_playbooks/ansible_hosts.yml
collections_paths=ansible_galaxy
roles_path=ansible_galaxy/roles
[inventory]
enable_plugins=yaml

View File

@ -1,21 +0,0 @@
---
collections:
- name: community.hashi_vault
version: 3.0.0
roles:
- src: https://github.com/IamTheFij/ansible-consul.git
name: ansible-consul
scm: git
version: my-main
- src: https://github.com/IamTheFij/ansible-nomad.git
name: ansible-nomad
scm: git
version: my-main
- src: https://github.com/ansible-community/ansible-vault.git
name: ansible-vault
scm: git
version: master
# - src: maxhoesel.smallstep
# version: 0.4.10
- src: geerlingguy.docker
version: 4.2.2

43
ansible_hosts.yml Normal file
View File

@ -0,0 +1,43 @@
---
all:
children:
servers:
hosts:
nomad0.thefij:
# consul_node_role: bootstrap
nomad_node_role: both
nomad_host_volumes:
- name: mysql-data
path: /srv/volumes/mysql-data
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: step-ca-data
path: /srv/volumes/step-ca-data
owner: "root"
group: "bin"
mode: "0700"
read_only: false
# consul_auto_encrypt:
# enabled: true
# dns_san: ["services.thefij"]
# ip_san: ["192.168.2.41", "127.0.0.1"]
# motionpi.thefij: {}
nomad1.thefij:
nomad_node_class: ingress
nomad_node_role: both
consul_instances:
children:
servers: {}
nomad_instances:
children:
servers: {}
vault_instances:
children:
servers: {}
ca_servers:
hosts:
nomad0.thefij:
step_path: /srv/volumes/step-ca-data

View File

@ -1,72 +0,0 @@
---
all:
hosts:
n1.thefij:
nomad_node_class: ingress
nomad_reserved_memory: 1024
# nomad_meta:
# hw_transcode.device: /dev/dri
# hw_transcode.type: intel
nfs_mounts:
- src: 10.50.250.2:/srv/volumes
path: /srv/volumes/moxy
opts: proto=tcp,rw
nomad_unique_host_volumes:
- name: mysql-data
path: /srv/volumes/mysql
owner: "999"
group: "100"
mode: "0755"
read_only: false
- name: postgres-data
path: /srv/volumes/postgres
owner: "999"
group: "999"
mode: "0755"
read_only: false
# n2.thefij:
# nomad_node_class: ingress
# nomad_reserved_memory: 1024
# nfs_mounts:
# - src: 10.50.250.2:/srv/volumes
# path: /srv/volumes/moxy
# opts: proto=tcp,rw
# nomad_unique_host_volumes:
# - name: nextcloud-data
# path: /srv/volumes/nextcloud
# owner: "root"
# group: "bin"
# mode: "0755"
# read_only: false
pi4:
nomad_node_class: ingress
nomad_reserved_memory: 512
nomad_meta:
hw_transcode.device: /dev/video11
hw_transcode.type: raspberry
qnomad.thefij:
ansible_host: 192.168.2.234
nomad_reserved_memory: 1024
# This VM uses a non-standard interface
nomad_network_interface: ens3
nomad_instances:
vars:
nomad_network_interface: eth0
children:
nomad_servers: {}
nomad_clients: {}
nomad_servers:
hosts:
nonopi.thefij:
ansible_host: 192.168.2.170
n1.thefij: {}
# n2.thefij: {}
pi4: {}
# qnomad.thefij: {}
nomad_clients:
hosts:
n1.thefij: {}
# n2.thefij: {}
pi4: {}
# qnomad.thefij: {}

View File

@ -1,43 +0,0 @@
# Stops Nomad and clears all data from its ata dirs
---
- name: Delete Nomad data
hosts: nomad_instances
tasks:
- name: Stop nomad
systemd:
name: nomad
state: stopped
become: true
- name: Kill nomad
shell:
cmd: systemctl kill nomad
become: true
- name: Stop all containers
shell:
cmd: docker ps -a | awk '/^[0-9abcdef]/{print $1}' | xargs -r docker stop
become: true
- name: Remove all containers
shell:
cmd: docker ps -a | awk '/^[0-9abcdef]/{print $1}' | xargs -r docker rm
become: true
- name: Unmount secrets
shell:
cmd: mount | awk '/nomad/ {print $3}' | xargs -n1 -r umount
become: true
- name: Remove data dir
file:
path: /var/nomad
state: absent
become: true
- name: Remove data dir
file:
path: /opt/nomad/data
state: absent
become: true

View File

@ -1,27 +0,0 @@
- name: Restart nomad and wesher
hosts: nomad_instances
tasks:
- name: Stop Nomad
systemd:
name: nomad
state: stopped
become: true
- name: Restart wesher
systemd:
name: wesher
state: restarted
become: true
- name: Start Docker
systemd:
name: docker
state: started
become: true
- name: Start Nomad
systemd:
name: nomad
state: started
become: true

View File

@ -1,49 +0,0 @@
---
- name: Recover Nomad
hosts: nomad_servers
any_errors_fatal: true
tasks:
- name: Stop Nomad
systemd:
name: nomad
state: stopped
become: true
- name: Remount all shares
command: mount -a
become: true
- name: Get node-id
slurp:
src: /var/nomad/server/node-id
register: nomad_node_id
become: true
- name: Node Info
debug:
msg: |
node_id: {{ nomad_node_id.content | b64decode }}
address: {{ ansible_default_ipv4.address }}
- name: Save
copy:
dest: /var/nomad/server/raft/peers.json
# I used to have reject('equalto', inventory_hostname) in the loop, but I'm not sure if I should
content: |
[
{% for host in ansible_play_hosts -%}
{
"id": "{{ hostvars[host].nomad_node_id.content | b64decode }}",
"address": "{{ hostvars[host].ansible_default_ipv4.address }}:4647",
"non_voter": false
}{% if not loop.last %},{% endif %}
{% endfor -%}
]
become: true
- name: Restart Nomad
systemd:
name: nomad
state: restarted
become: true

View File

@ -1,380 +0,0 @@
---
- name: Update DNS for bootstrapping with non-Nomad host
hosts: nomad_instances
become: true
gather_facts: false
vars:
non_nomad_dns: 192.168.2.170
tasks:
- name: Add non-nomad bootstrap DNS
lineinfile:
dest: /etc/resolv.conf
create: true
line: "nameserver {{ non_nomad_dns }}"
- name: Install Docker
hosts: nomad_clients
become: true
vars:
docker_architecture_map:
x86_64: amd64
armv7l: armhf
aarch64: arm64
docker_apt_arch: "{{ docker_architecture_map[ansible_architecture] }}"
docker_compose_arch: "{{ (ansible_architecture == 'armv7l') | ternary('armv7', ansible_architecture) }}"
roles:
- geerlingguy.docker
tasks:
- name: Remove snapd
package:
name: snapd
state: absent
# Not on Ubuntu 20.04
# - name: Install Podman
# hosts: nomad_instances
# become: true
#
# tasks:
# - name: Install Podman
# package:
# name: podman
# state: present
- name: Create NFS mounts
hosts: nomad_clients
become: true
vars:
shared_nfs_mounts:
- src: 192.168.2.10:/Media
path: /srv/volumes/media-read
opts: proto=tcp,port=2049,ro
- src: 192.168.2.10:/Media
path: /srv/volumes/media-write
opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Overflow
path: /srv/volumes/nas-overflow
opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Photos
path: /srv/volumes/photos
opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Container
path: /srv/volumes/nas-container
opts: proto=tcp,port=2049,rw
tasks:
- name: Install nfs
package:
name: nfs-common
state: present
- name: Mount NFS volumes
ansible.posix.mount:
src: "{{ item.src }}"
path: "{{ item.path }}"
opts: "{{ item.opts }}"
state: mounted
fstype: nfs4
loop: "{{ shared_nfs_mounts + (nfs_mounts | default([])) }}"
- import_playbook: wesher.yml
- name: Build Nomad cluster
hosts: nomad_instances
any_errors_fatal: true
become: true
vars:
shared_host_volumes:
- name: media-read
path: /srv/volumes/media-write
read_only: true
- name: media-write
path: /srv/volumes/media-write
owner: "root"
group: "root"
mode: "0755"
read_only: false
- name: media-overflow-write
path: /srv/volumes/nas-overflow/Media
owner: "root"
group: "root"
mode: "0755"
read_only: false
- name: media-downloads
path: /srv/volumes/media-write/Downloads
read_only: false
- name: sabnzbd-config
path: /srv/volumes/media-write/Downloads/sabnzbd
read_only: false
- name: photoprism-media
path: /srv/volumes/photos/Photoprism
read_only: false
- name: photoprism-storage
path: /srv/volumes/nas-container/photoprism
read_only: false
- name: nzbget-config
path: /srv/volumes/nas-container/nzbget
read_only: false
- name: sonarr-config
path: /srv/volumes/nas-container/sonarr
read_only: false
- name: lidarr-config
path: /srv/volumes/nas-container/lidarr
read_only: false
- name: radarr-config
path: /srv/volumes/nas-container/radarr
read_only: false
- name: bazarr-config
path: /srv/volumes/nas-container/bazarr
read_only: false
- name: gitea-data
path: /srv/volumes/nas-container/gitea
read_only: false
- name: ytdl-web
path: /srv/volumes/nas-container/ytdl-web
read_only: false
- name: christmas-community
path: /srv/volumes/nas-container/christmas-community
read_only: false
- name: all-volumes
path: /srv/volumes
owner: "root"
group: "root"
mode: "0755"
read_only: false
roles:
- name: ansible-nomad
vars:
nomad_version: "1.9.3-1"
nomad_install_upgrade: true
nomad_allow_purge_config: true
nomad_node_role: "{% if 'nomad_clients' in group_names %}{% if 'nomad_servers' in group_names %}both{% else %}client{% endif %}{% else %}server{% endif %}"
# Where nomad gets installed to
nomad_bin_dir: /usr/bin
nomad_install_from_repo: true
nomad_bootstrap_expect: "{{ [(play_hosts | length), 3] | min }}"
nomad_raft_protocol: 3
nomad_autopilot: true
nomad_encrypt_enable: true
# nomad_use_consul: true
# Metrics
nomad_telemetry: true
nomad_telemetry_prometheus_metrics: true
nomad_telemetry_publish_allocation_metrics: true
nomad_telemetry_publish_node_metrics: true
# Enable container plugins
nomad_cni_enable: true
nomad_cni_version: 1.0.1
nomad_docker_enable: true
nomad_docker_dmsetup: false
# nomad_podman_enable: true
# Merge shared host volumes with node volumes
nomad_host_volumes: "{{ shared_host_volumes + (nomad_unique_host_volumes | default([])) }}"
# Customize docker plugin
nomad_plugins:
docker:
config:
allow_privileged: true
gc:
image_delay: "24h"
volumes:
enabled: true
selinuxlabel: "z"
# Send logs to journald so we can scrape them for Loki
# logging:
# type: journald
extra_labels:
- "job_name"
- "job_id"
- "task_group_name"
- "task_name"
- "namespace"
- "node_name"
- "node_id"
# Bind nomad
nomad_bind_address: 0.0.0.0
# Default interface for binding tasks
# This is now set at the inventory level
# nomad_network_interface: eth0
# Create networks for binding task ports
nomad_host_networks:
- name: loopback
interface: lo
reserved_ports: "22"
- name: wesher
interface: wgoverlay
reserved_ports: "22"
# Enable ACLs
nomad_acl_enabled: true
nomad_config_custom:
ui:
enabled: true
- name: Bootstrap Nomad ACLs and scheduler
hosts: nomad_servers
tasks:
- name: Start Nomad
systemd:
state: started
name: nomad
- name: Nomad API reachable?
uri:
url: "http://127.0.0.1:4646/v1/status/leader"
method: GET
status_code: 200
register: nomad_check_result
retries: 8
until: nomad_check_result is succeeded
delay: 15
changed_when: false
run_once: true
- name: Bootstrap ACLs
command:
argv:
- "nomad"
- "acl"
- "bootstrap"
- "-json"
run_once: true
ignore_errors: true
register: bootstrap_result
changed_when: bootstrap_result is succeeded
- name: Save bootstrap result
copy:
content: "{{ bootstrap_result.stdout }}"
dest: "../nomad_bootstrap.json"
when: bootstrap_result is succeeded
delegate_to: localhost
run_once: true
- name: Read secret
command:
argv:
- jq
- -r
- .SecretID
- ../nomad_bootstrap.json
delegate_to: localhost
run_once: true
no_log: true
changed_when: false
register: read_secretid
- name: Look for policy
command:
argv:
- nomad
- acl
- policy
- list
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
register: policies
run_once: true
changed_when: false
- name: Copy policy
copy:
src: ../acls/nomad-anon-policy.hcl
dest: /tmp/anonymous.policy.hcl
delegate_to: "{{ play_hosts[0] }}"
run_once: true
register: anon_policy
- name: Create anon-policy
command:
argv:
- nomad
- acl
- policy
- apply
- -description=Anon read only
- anonymous
- /tmp/anonymous.policy.hcl
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
when: policies.stdout == "No policies found" or anon_policy.changed
delegate_to: "{{ play_hosts[0] }}"
run_once: true
- name: Read scheduler config
command:
argv:
- nomad
- operator
- scheduler
- get-config
- -json
run_once: true
register: scheduler_config
changed_when: false
- name: Enable service scheduler preemption
command:
argv:
- nomad
- operator
- scheduler
- set-config
- -preempt-service-scheduler=true
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
run_once: true
when: (scheduler_config.stdout | from_json)["SchedulerConfig"]["PreemptionConfig"]["ServiceSchedulerEnabled"] is false
- name: Enable system scheduler preemption
command:
argv:
- nomad
- operator
- scheduler
- set-config
- -preempt-system-scheduler=true
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
run_once: true
when: (scheduler_config.stdout | from_json)["SchedulerConfig"]["PreemptionConfig"]["SystemSchedulerEnabled"] is false
# - name: Set up Nomad backend and roles in Vault
# community.general.terraform:
# project_path: ../acls
# force_init: true
# variables:
# consul_address: "{{ play_hosts[0] }}:8500"
# vault_token: "{{ root_token }}"
# nomad_secret_id: "{{ read_secretid.stdout }}"
# delegate_to: localhost
# run_once: true
# notify:
# - Restart Nomad
handlers:
- name: Restart Nomad
systemd:
state: restarted
name: nomad
retries: 6
delay: 5

View File

@ -1,10 +0,0 @@
---
- name: Stop Nomad
hosts: nomad_instances
tasks:
- name: Stop Nomad
systemd:
name: nomad
state: stopped
become: true

View File

@ -1,162 +0,0 @@
nomad/jobs:
base_hostname: VALUE
db_user_ro: VALUE
ldap_base_dn: VALUE
notify_email: VALUE
nomad/jobs/authelia:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
email_sender: VALUE
jwt_secret: VALUE
oidc_clients: VALUE
oidc_hmac_secret: VALUE
oidc_issuer_certificate_chain: VALUE
oidc_issuer_private_key: VALUE
session_secret: VALUE
storage_encryption_key: VALUE
nomad/jobs/authelia/authelia/stunnel:
redis_stunnel_psk: VALUE
nomad/jobs/backup:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-n1:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-n2:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-pi4:
backup_passphrase: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/bazarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/blocky:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
mappings: VALUE
whitelists_ads: VALUE
nomad/jobs/blocky/blocky/stunnel:
redis_stunnel_psk: VALUE
nomad/jobs/ddclient:
domain: VALUE
domain_ddclient: VALUE
zone: VALUE
nomad/jobs/diun:
slack_hook_url: VALUE
nomad/jobs/git:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
oidc_secret: VALUE
secret_key: VALUE
smtp_sender: VALUE
nomad/jobs/grafana:
admin_pw: VALUE
alert_email_addresses: VALUE
db_name: VALUE
db_pass: VALUE
db_pass_ro: VALUE
db_user: VALUE
db_user_ro: VALUE
minio_access_key: VALUE
minio_secret_key: VALUE
oidc_secret: VALUE
slack_bot_token: VALUE
slack_bot_url: VALUE
slack_hook_url: VALUE
smtp_password: VALUE
smtp_user: VALUE
nomad/jobs/immich:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/lego:
acme_email: VALUE
domain_lego_dns: VALUE
usersfile: VALUE
nomad/jobs/lidarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/lldap:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
jwt_secret: VALUE
key_seed: VALUE
smtp_from: VALUE
smtp_reply_to: VALUE
nomad/jobs/minitor:
mailgun_api_key: VALUE
nomad/jobs/mysql-server:
mysql_root_password: VALUE
nomad/jobs/photoprism:
admin_password: VALUE
admin_user: VALUE
db_name: VALUE
db_pass: VALUE
db_user: VALUE
oidc_secret: VALUE
nomad/jobs/postgres-server:
superuser: VALUE
superuser_pass: VALUE
nomad/jobs/radarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/redis-authelia:
allowed_psks: VALUE
nomad/jobs/redis-blocky:
allowed_psks: VALUE
nomad/jobs/rediscommander:
redis_stunnel_psk: VALUE
nomad/jobs/sonarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/traefik:
external: VALUE
usersfile: VALUE
nomad/jobs/unifi-traffic-route-ips:
unifi_password: VALUE
unifi_username: VALUE
nomad/jobs/wishlist:
guest_password: VALUE
nomad/oidc:
secret: VALUE
secrets/ldap:
admin_email: VALUE
admin_password: VALUE
admin_user: VALUE
secrets/mysql:
mysql_root_password: VALUE
secrets/postgres:
superuser: VALUE
superuser_pass: VALUE
secrets/smtp:
password: VALUE
port: VALUE
server: VALUE
tls: VALUE
user: VALUE

View File

@ -1,2 +0,0 @@
---
wesher_key: "{{ vault_wesher_key }}"

View File

@ -1,53 +0,0 @@
- name: Create overlay network
hosts: nomad_instances
become: true
vars_files:
- vars/wesher_vars.yml
- vars/vault_wesher_vars.yml
vars:
wesher_key: "{{ wesher_key }}"
wesher_version: v0.2.6
wesher_arch_map:
x86_64: amd64
armv7l: arm
aarch64: arm64
wesher_arch: "{{ wesher_arch_map[ansible_architecture] }}"
# wesher_sha256_map:
# x86_64: 8c551ca211d7809246444765b5552a8d1742420c64eff5677d1e27a34c72aeef
# armv7l: 97f5bbf2b00b8b11a4ca224540bf9c1affdb15432c3b6ad8da4c1a7b6175eb5d
# aarch64: 507c6397d67ea90bddb3e1c06ec9d8e38d4342ed6f0f6b47855fecc9f1d6fae0
# wesher_checksum: sha256:{{ wesher_sha256_map[ansible_architecture] }}
wesher_checksum: sha256:https://github.com/costela/wesher/releases/download/{{ wesher_version }}/wesher.sha256sums
tasks:
- name: Download wesher
get_url:
url: https://github.com/costela/wesher/releases/download/{{ wesher_version }}/wesher-{{ wesher_arch }}
dest: /usr/local/sbin/wesher
checksum: "{{ wesher_checksum }}"
owner: root
mode: "0755"
- name: Install systemd unit
get_url:
url: https://github.com/costela/wesher/raw/{{ wesher_version }}/dist/wesher.service
dest: /etc/systemd/system/wesher.service
- name: Write wesher config
lineinfile:
path: /etc/default/wesher
create: true
regexp: "^{{ item.split('=')[0] }}"
line: "{{ item }}"
owner: root
mode: "0600"
loop:
- WESHER_CLUSTER_KEY={{ wesher_key }}
- WESHER_JOIN={% for host in ansible_play_hosts %}{{ hostvars[host].ansible_default_ipv4.address }}{% if not loop.last %},{% endif %}{% endfor %}
- name: Start wesher
systemd:
name: wesher.service
daemon_reload: true
state: started
enabled: true

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.0.0"
hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}

View File

@ -1,252 +0,0 @@
job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
datacenters = ["dc1"]
priority = 90
%{ if batch_node == null ~}
type = "system"
%{ else ~}
type = "batch"
parameterized {
meta_required = ["job_name"]
meta_optional = ["task", "snapshot"]
}
meta {
task = "backup"
snapshot = "latest"
}
%{ endif ~}
%{ if batch_node != null ~}
constraint {
attribute = "$${node.unique.name}"
value = "${batch_node}"
}
%{ endif ~}
group "backup" {
network {
mode = "bridge"
port "metrics" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 8080
}
}
volume "all-volumes" {
type = "host"
read_only = false
source = "all-volumes"
}
ephemeral_disk {
# Try to keep restic cache intact
sticky = true
}
service {
name = "backup"
provider = "nomad"
port = "metrics"
tags = [
"prometheus.scrape"
]
}
task "backup" {
driver = "docker"
shutdown_delay = "5m"
volume_mount {
volume = "all-volumes"
destination = "/data"
read_only = false
}
config {
image = "iamthefij/restic-scheduler:0.4.2"
ports = ["metrics"]
args = [
"--push-gateway",
"http://pushgateway.nomad:9091",
%{ if batch_node != null ~}
"-once",
"-$${NOMAD_META_task}",
"$${NOMAD_META_job_name}",
"--snapshot",
"$${NOMAD_META_snapshot}",
%{ endif ~}
"$${NOMAD_TASK_DIR}/node-jobs.hcl",
]
}
action "unlockenv" {
command = "sh"
args = ["-c", "/bin/restic-scheduler -once -unlock all $${NOMAD_TASK_DIR}/node-jobs.hcl"]
}
action "unlocktmpl" {
command = "/bin/restic-scheduler"
args = ["-once", "-unlock", "all", "{{ env 'NOMAD_TASK_DIR' }}/node-jobs.hcl"]
}
action "unlockhc" {
command = "/bin/restic-scheduler"
args = ["-once", "-unlock", "all", "/local/node-jobs.hcl"]
}
action "backupall" {
command = "/bin/restic-scheduler"
args = ["-once", "-backup", "all", "/local/node-jobs.hcl"]
}
action "backupallenv" {
command = "sh"
args = ["-c", "/bin/restic-scheduler -once -backup all $${NOMAD_TASK_DIR}/node-jobs.hcl"]
}
env = {
RCLONE_CHECKERS = "2"
RCLONE_TRANSFERS = "2"
RCLONE_FTP_CONCURRENCY = "5"
RESTIC_CACHE_DIR = "$${NOMAD_ALLOC_DIR}/data"
TZ = "America/Los_Angeles"
}
template {
data = <<EOF
MYSQL_HOST=127.0.0.1
MYSQL_PORT=3306
{{ with nomadVar "secrets/mysql" }}
MYSQL_USER=root
MYSQL_PASSWORD={{ .mysql_root_password }}
{{ end -}}
{{ with nomadVar "secrets/postgres" }}
POSTGRES_HOST=127.0.0.1
POSTGRES_PORT=5432
POSTGRES_USER={{ .superuser }}
POSTGRES_PASSWORD={{ .superuser_password }}
{{ end -}}
{{ with nomadVar (print "nomad/jobs/" (index (env "NOMAD_JOB_ID" | split "/") 0)) -}}
BACKUP_PASSPHRASE={{ .backup_passphrase }}
RCLONE_FTP_HOST={{ .nas_ftp_host }}
RCLONE_FTP_USER={{ .nas_ftp_user }}
RCLONE_FTP_PASS={{ .nas_ftp_pass.Value | toJSON }}
RCLONE_FTP_EXPLICIT_TLS=true
RCLONE_FTP_NO_CHECK_CERTIFICATE=true
AWS_ACCESS_KEY_ID={{ .nas_minio_access_key_id }}
AWS_SECRET_ACCESS_KEY={{ .nas_minio_secret_access_key }}
{{ end -}}
EOF
destination = "secrets/db.env"
env = true
}
template {
# Build jobs based on node
data = <<EOF
# Current node is {{ env "node.unique.name" }} {{ env "node.unique.id" }}
%{ for job_file in fileset(module_path, "jobs/*.hcl") ~}
{{ range nomadService 1 "backups" "${trimsuffix(basename(job_file), ".hcl")}" -}}
# ${trimsuffix(basename(job_file), ".hcl")} .Node {{ .Node }}
{{ if eq .Node (env "node.unique.id") -}}
${file("${module_path}/${job_file}")}
{{ end -}}
{{ end -}}
%{ endfor ~}
# Dummy job to keep task healthy on node without any stateful services
job "Dummy" {
schedule = "@daily"
config {
repo = "/local/dummy-repo"
passphrase = env("BACKUP_PASSPHRASE")
}
backup {
paths = ["/local/node-jobs.hcl"]
}
forget {
KeepLast = 1
}
}
EOF
destination = "local/node-jobs.hcl"
}
resources {
cpu = 50
memory = 500
}
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" }}
connect = {{ .Address }}:{{ .Port }}
{{ end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
[postgres_client]
client = yes
accept = 127.0.0.1:5432
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "postgres-tls" }}
connect = {{ .Address }}:{{ .Port }}
{{ end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/postgres_stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/backups" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
template {
data = <<EOF
{{- with nomadVar "secrets/postgres/allowed_psks/backups" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/postgres_stunnel_psk.txt"
}
}
}
}

View File

@ -1,136 +0,0 @@
resource "nomad_job" "backup" {
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
batch_node = null,
use_wesher = var.use_wesher
})
}
resource "nomad_job" "backup-oneoff" {
# TODO: Get list of nomad hosts dynamically
for_each = toset(["n1", "pi4"])
# for_each = toset([
# for node in data.consul_service.nomad.service :
# node.node_name
# ])
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
batch_node = each.key,
use_wesher = var.use_wesher
})
}
locals {
# NOTE: This can't be dynamic in first deploy since these values are not known
# all_job_ids = toset(flatten([[for job in resource.nomad_job.backup-oneoff : job.id], [resource.nomad_job.backup.id]]))
all_job_ids = toset(["backup", "backup-oneoff-n1", "backup-oneoff-pi4"])
}
resource "nomad_acl_policy" "secrets_mysql" {
for_each = local.all_job_ids
name = "${each.key}-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
}
}
resource "random_password" "mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "mysql_psk" {
path = "secrets/mysql/allowed_psks/backups"
items = {
psk = "backups:${resource.random_password.mysql_psk.result}"
}
}
resource "nomad_acl_policy" "mysql_psk" {
for_each = local.all_job_ids
name = "${each.key}-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/backups" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
group = "backup"
task = "stunnel"
}
}
resource "nomad_acl_policy" "secrets_postgres" {
for_each = local.all_job_ids
name = "${each.key}-secrets-postgres"
description = "Give access to Postgres secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
}
}
resource "random_password" "postgres_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "postgres_psk" {
path = "secrets/postgres/allowed_psks/backups"
items = {
psk = "backups:${resource.random_password.postgres_psk.result}"
}
}
resource "nomad_acl_policy" "postgres_psk" {
for_each = local.all_job_ids
name = "${each.key}-secrets-postgres-psk"
description = "Give access to Postgres PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres/allowed_psks/backups" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
group = "backup"
task = "stunnel"
}
}

View File

@ -1,54 +0,0 @@
job "authelia" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/authelia"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local authelia dir" {
pre_script {
on_backup = "mkdir -p /local/authelia"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "authelia"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/authelia/dump.sql"
}
}
backup {
paths = ["/local/authelia"]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,57 +0,0 @@
job "git" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/gitea"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local gitea dir" {
pre_script {
on_backup = "mkdir -p /local/gitea"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "gitea"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/gitea/dump.sql"
}
}
backup {
paths = [
"/local/gitea",
"/data/nas-container/gitea",
]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,54 +0,0 @@
job "grafana" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/grafana"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local grafana dir" {
pre_script {
on_backup = "mkdir -p /local/grafana"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "grafana"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/grafana/dump.sql"
}
}
backup {
paths = ["/local/grafana"]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,64 +0,0 @@
job "lidarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/lidarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "lidarr"
no_tablespaces = true
dump_to = "/data/nas-container/lidarr/Backups/dump-lidarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "lidarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/lidarr/Backups/dump-lidarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/lidarr"]
backup_opts {
Exclude = [
"lidarr_backup_*.zip",
"/data/nas-container/lidarr/MediaCover",
"/data/nas-container/lidarr/logs",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,53 +0,0 @@
job "lldap" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/lldap"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local backup dir" {
pre_script {
on_backup = "mkdir -p /local/lldap"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
database = "lldap"
no_tablespaces = true
dump_to = "/local/lldap/dump.sql"
}
}
backup {
paths = ["/local/lldap"]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,41 +0,0 @@
job "nzbget" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/nzbget"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
backup {
paths = [
# Configuration
"/data/nas-container/nzbget",
# Queued nzb files
"/data/media-write/Downloads/nzb",
]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,60 +0,0 @@
job "photoprism" {
schedule = "10 * * * *"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/photoprism"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local photoprism dir" {
pre_script {
on_backup = "mkdir -p /local/photoprism"
}
}
task "Dump database" {
mysql "Dump database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "photoprism"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/photoprism/dump.sql"
}
}
backup {
paths = [
"/local/photoprism",
"/data/nas-container/photoprism",
]
backup_opts {
Host = "nomad"
Exclude = [
"/data/nas-container/photoprism/cache",
]
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,64 +0,0 @@
job "radarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/radarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "radarr"
no_tablespaces = true
dump_to = "/data/nas-container/radarr/Backups/dump-radarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "radarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/radarr/Backups/dump-radarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/radarr"]
backup_opts {
Exclude = [
"radarr_backup_*.zip",
"/data/nas-container/radarr/MediaCover",
"/data/nas-container/radarr/logs",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,36 +0,0 @@
job "sabnzbd" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/sabnzbd"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
backup {
paths = ["/data/media-write/Downloads/sabnzbd"]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,67 +0,0 @@
job "sonarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/sonarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "sonarr"
no_tablespaces = true
dump_to = "/data/nas-container/sonarr/Backups/dump-sonarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "sonarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/sonarr/Backups/dump-sonarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/sonarr"]
backup_opts {
Exclude = [
"sonarr_backup_*.zip",
"/data/nas-container/sonarr/MediaCover",
"/data/nas-container/sonarr/logs",
"*.db",
"*.db-shm",
"*.db-wal",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,5 +0,0 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

107
blocky/blocky.nomad Normal file
View File

@ -0,0 +1,107 @@
variable "config_data" {
type = string
description = "Plain text config file for blocky"
}
job "blocky" {
datacenters = ["dc1"]
type = "service"
group "blocky" {
count = 1
network {
mode = "bridge"
port "dns" {
static = "53"
}
port "api" {
to = "4000"
}
}
service {
name = "blocky-dns"
port = "dns"
}
service {
name = "blocky-api"
port = "api"
meta {
metrics_addr = "${NOMAD_ADDR_api}"
}
connect {
sidecar_service {
proxy {
local_service_port = 400
expose {
path {
path = "/metrics"
protocol = "http"
local_path_port = 4000
listener_port = "api"
}
}
upstreams {
destination_name = "redis"
local_bind_port = 6379
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
check {
name = "api-health"
port = "api"
type = "http"
path = "/"
interval = "10s"
timeout = "3s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.blocky-api.entryPoints=websecure",
]
}
task "main" {
driver = "docker"
config {
image = "ghcr.io/0xerr0r/blocky"
ports = ["dns", "api"]
mount {
type = "bind"
target = "/app/config.yml"
source = "app/config.yml"
}
}
resources {
cpu = 50
memory = 100
}
template {
data = var.config_data
destination = "app/config.yml"
}
}
}
}

28
blocky/blocky.tf Normal file
View File

@ -0,0 +1,28 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
locals {
config_data = templatefile(
"${path.module}/config.yml",
{
"base_hostname" = "${var.base_hostname}",
# Could get this from consul_service.traefik
# but not sure what happens if it doens't exist yet
"ingress_address" = "192.168.2.106",
}
)
}
resource "nomad_job" "blocky" {
hcl2 {
enabled = true
vars = {
"config_data" = "${local.config_data}",
}
}
jobspec = file("${path.module}/blocky.nomad")
}

46
blocky/config.yml Normal file
View File

@ -0,0 +1,46 @@
upstream:
default:
- 1.1.1.1
- 1.0.0.1
blocking:
blackLists:
ads:
- https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
- http://sysctl.org/cameleon/hosts
- https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt
- https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
- https://hosts-file.net/ad_servers.txt
smarttv:
- https://perflyst.github.io/PiHoleBlocklist/SmartTV.txt
- https://perflyst.github.io/PiHoleBlocklist/regex.list
malware:
- https://mirror1.malwaredomains.com/files/justdomains
clientGroupsBlock:
default:
- ads
- malware
customDNS:
customTTL: 1h
mapping:
${base_hostname}: ${ingress_address}
prometheus:
enable: true
redis:
address: {{ env "NOMAD_UPSTREAM_ADDR_redis" }}
# password: passwd
# database: 2
required: true
# connectionAttempts: 10
# connectionCooldown: 3s
# queryLog:
# type: mysql
# target: db_user:db_password@tcp(db_host_or_ip:3306)/db_user?charset=utf8mb4&parseTime=True&loc=Local
# logRetentionDays: 7
port: 53
httpPort: 4000

1
ca/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
step_path

19
ca/Makefile Normal file
View File

@ -0,0 +1,19 @@
STEPPATH ?= ./step_path
.PHONY: bootstrap
bootstrap: $(STEPPATH)/config/ca.json
$(STEPPATH)/config/ca.json:
env STEPPATH=$(STEPPATH) \
step ca init \
--ssh \
--deployment-type standalone \
--name TheFij \
--dns ca.thefij.rocks \
--address 0.0.0.0:9443 \
--provisioner ian@iamthefij.com
.PHONY: run
run: $(STEPPATH)/config/ca.json
env STEPPATH=$(STEPPATH) \
step-ca $(STEPPATH)/config/ca.json

35
ca/setup-ca.yml Normal file
View File

@ -0,0 +1,35 @@
---
- name: Set up CA
hosts: ca_servers
become: true
tasks:
- name: Create step_path
file:
path: "{{ step_path }}"
state: directory
owner: root
mode: "0700"
- name: Install step-ca
include_role:
name: maxhoesel.smallstep.step_ca
vars:
step_ca_name: TheFij CA
step_ca_root_password: ...
step_ca_intermediate_password: ...
step_cli_steppath: "{{ step_path }}"
- name: Read fingerprint
command: "step-cli certificate fingerprint {{ step_path }}/certs/root_ca.crt"
register: root_ca_fp
- name: Bootstrap other hosts
hosts: servers
tasks:
- name: Boostrap hosts to trust CA
include_role:
name: maxhoesel.smallstep.step_bootstrap_host
vars:
step_bootstrap_ca_url: http

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.1.1"
hashes = [
"h1:liQBgBXfQEYmwpoGZUfSsu0U0t/nhvuRZbMhaMz7wcQ=",
"zh:28bc6922e8a21334568410760150d9d413d7b190d60b5f0b4aab2f4ef52efeeb",
"zh:2d4283740e92ce1403875486cd5ff2c8acf9df28c190873ab4d769ce37db10c1",
"zh:457e16d70075eae714a7df249d3ba42c2176f19b6750650152c56f33959028d9",
"zh:49ee88371e355c00971eefee6b5001392431b47b3e760a5c649dda76f59fb8fa",
"zh:614ad3bf07155ed8a5ced41dafb09042afbd1868490a379654b3e970def8e33d",
"zh:75be7199d76987e7549e1f27439922973d1bf27353b44a593bfbbc2e3b9f698f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:888e14a24410d56b37212fbea373a3e0401d0ff8f8e4f4dd00ba8b29de9fed39",
"zh:aa261925e8b152636a0886b3a2149864707632d836e98a88dacea6cfb6302082",
"zh:ac10cefb4064b3bb63d4b0379624a416a45acf778eac0004466f726ead686196",
"zh:b1a3c8b4d5b2dc9b510eac5e9e02665582862c24eb819ab74f44d3d880246d4f",
"zh:c552e2fe5670b6d3ad9a5faf78e3a27197eeedbe2b13928d2c491fa509bc47c7",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0"
hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
]
}

View File

@ -1,204 +0,0 @@
module "authelia" {
source = "../services/service"
name = "authelia"
instance_count = 2
priority = 70
image = "authelia/authelia:4.38"
args = ["--config", "$${NOMAD_TASK_DIR}/authelia.yml"]
ingress = true
service_port = 9999
service_port_static = true
use_wesher = var.use_wesher
# metrics_port = 9959
env = {
AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/ldap_password.txt"
AUTHELIA_JWT_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
AUTHELIA_SESSION_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/session_secret.txt"
AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE = "$${NOMAD_SECRETS_DIR}/storage_encryption_key.txt"
AUTHELIA_STORAGE_MYSQL_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/mysql_password.txt"
AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/oidc_hmac_secret.txt"
AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_PRIVATE_KEY_FILE = "$${NOMAD_SECRETS_DIR}/oidc_issuer_private_key.txt"
# AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_CERTIFICATE_CHAIN_FILE = "$${NOMAD_SECRETS_DIR}/oidc_issuer_certificate_chain.txt"
}
use_mysql = true
use_ldap = true
use_redis = true
use_smtp = true
mysql_bootstrap = {
enabled = true
}
service_tags = [
# Configure traefik to add this middleware
"traefik.http.middlewares.authelia.forwardAuth.address=http://authelia.nomad:$${NOMAD_PORT_main}/api/verify?rd=https%3A%2F%2Fauthelia.${var.base_hostname}%2F",
"traefik.http.middlewares.authelia.forwardAuth.trustForwardHeader=true",
"traefik.http.middlewares.authelia.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email",
"traefik.http.middlewares.authelia-basic.forwardAuth.address=http://authelia.nomad:$${NOMAD_PORT_main}/api/verify?auth=basic",
"traefik.http.middlewares.authelia-basic.forwardAuth.trustForwardHeader=true",
"traefik.http.middlewares.authelia-basic.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email",
]
templates = [
{
data = file("${path.module}/authelia.yml")
dest = "authelia.yml"
mount = false
},
{
data = "{{ with nomadVar \"secrets/ldap\" }}{{ .admin_password }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "ldap_password.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .jwt_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "jwt_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .session_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "session_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .storage_encryption_key }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "storage_encryption_key.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .db_pass }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "mysql_password.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_hmac_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_hmac_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_issuer_private_key }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_issuer_private_key.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_issuer_certificate_chain }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_issuer_certificate_chain.txt"
mount = false
},
{
data = "{{ with nomadVar \"secrets/smtp\" }}{{ .password }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "smtp_password.txt"
mount = false
},
]
}
resource "nomad_acl_policy" "authelia" {
name = "authelia"
description = "Give access to shared authelia variables"
rules_hcl = <<EOH
namespace "default" {
variables {
path "authelia/*" {
capabilities = ["read"]
}
path "secrets/authelia/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = module.authelia.job_id
}
}
# Give access to ldap secrets
resource "nomad_acl_policy" "authelia_ldap_secrets" {
name = "authelia-secrets-ldap"
description = "Give access to LDAP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/ldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = module.authelia.job_id
}
}
# Enable oidc for nomad clients
module "nomad_oidc_client" {
source = "./oidc_client"
name = "nomad"
oidc_client_config = {
description = "Nomad"
authorization_policy = "two_factor"
redirect_uris = [
"https://nomad.${var.base_hostname}/oidc/callback",
"https://nomad.${var.base_hostname}/ui/settings/tokens",
]
scopes = ["openid", "groups"]
}
}
resource "nomad_acl_auth_method" "nomad_authelia" {
name = "authelia"
type = "OIDC"
token_locality = "global"
max_token_ttl = "1h0m0s"
default = true
config {
oidc_discovery_url = "https://authelia.${var.base_hostname}"
oidc_client_id = module.nomad_oidc_client.client_id
oidc_client_secret = module.nomad_oidc_client.secret
bound_audiences = [module.nomad_oidc_client.client_id]
oidc_scopes = [
"groups",
"openid",
]
allowed_redirect_uris = [
"https://nomad.${var.base_hostname}/oidc/callback",
"https://nomad.${var.base_hostname}/ui/settings/tokens",
]
list_claim_mappings = {
"groups" : "roles"
}
}
}
resource "nomad_acl_binding_rule" "nomad_authelia_admin" {
description = "engineering rule"
auth_method = nomad_acl_auth_method.nomad_authelia.name
selector = "\"nomad-admin\" in list.roles"
bind_type = "role"
bind_name = "admin" # acls.nomad_acl_role.admin.name
}
resource "nomad_acl_binding_rule" "nomad_authelia_deploy" {
description = "engineering rule"
auth_method = nomad_acl_auth_method.nomad_authelia.name
selector = "\"nomad-deploy\" in list.roles"
bind_type = "role"
bind_name = "deploy" # acls.nomad_acl_role.deploy.name
}

View File

@ -1,278 +0,0 @@
theme: auto
# jwt_secret: <file>
{{ with nomadVar "nomad/jobs" }}
default_redirection_url: https://authelia.{{ .base_hostname }}/
{{ end }}
## Set the default 2FA method for new users and for when a user has a preferred method configured that has been
## disabled. This setting must be a method that is enabled.
## Options are totp, webauthn, mobile_push.
default_2fa_method: ""
server:
host: 0.0.0.0
port: {{ env "NOMAD_PORT_main" }}
disable_healthcheck: false
log:
## Level of verbosity for logs: info, debug, trace.
level: debug
format: json
telemetry:
metrics:
enabled: false
# address: '0.0.0.0:{{ env "NOMAD_PORT_metrics" }}'
totp:
disable: false
issuer: {{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
digits: 6
## The TOTP algorithm to use.
## It is CRITICAL you read the documentation before changing this option:
## https://www.authelia.com/c/totp#algorithm
algorithm: sha1
webauthn:
disable: false
timeout: 60s
display_name: {{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
user_verification: preferred
duo_api:
disable: true
# hostname:
# integration_key:
# secret_key:
# enable_self_enrollment: false
authentication_backend:
disable_reset_password: false
## Password Reset Options.
password_reset:
## External reset password url that redirects the user to an external reset portal. This disables the internal reset
## functionality.
# TODO: not sure if this is needed, probably not?
custom_url: ""
refresh_interval: 5m
ldap:
implementation: custom
# stunnel url
url: ldap://127.0.0.1:389
timeout: 5s
# TODO: Maybe use stunnel for this
start_tls: false
base_dn: {{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}
additional_users_dn: ou=people
additional_groups_dn: ou=groups
username_attribute: uid
group_name_attribute: cn
mail_attribute: mail
display_name_attribute: displayName
# To allow sign in both with username and email, one can use a filter like
# (&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=person))
users_filter: "(&({username_attribute}={input})(objectClass=person))"
# Only supported filter by lldap right now
groups_filter: (member={dn})
## The username and password of the admin user.
{{ with nomadVar "secrets/ldap" }}
user: uid={{ .admin_user }},ou=people,{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}
{{ end }}
# password set using secrets file
# password: <secret>
password_policy:
standard:
enabled: false
min_length: 8
max_length: 0
require_uppercase: true
require_lowercase: true
require_number: true
require_special: true
zxcvbn:
enabled: false
min_score: 3
##
## Access Control Configuration
##
## Access control is a list of rules defining the authorizations applied for one resource to users or group of users.
##
## If 'access_control' is not defined, ACL rules are disabled and the 'bypass' rule is applied, i.e., access is allowed
## to anyone. Otherwise restrictions follow the rules defined.
##
## Note: One can use the wildcard * to match any subdomain.
## It must stand at the beginning of the pattern. (example: *.mydomain.com)
##
## Note: You must put patterns containing wildcards between simple quotes for the YAML to be syntactically correct.
##
## Definition: A 'rule' is an object with the following keys: 'domain', 'subject', 'policy' and 'resources'.
##
## - 'domain' defines which domain or set of domains the rule applies to.
##
## - 'subject' defines the subject to apply authorizations to. This parameter is optional and matching any user if not
## provided. If provided, the parameter represents either a user or a group. It should be of the form
## 'user:<username>' or 'group:<groupname>'.
##
## - 'policy' is the policy to apply to resources. It must be either 'bypass', 'one_factor', 'two_factor' or 'deny'.
##
## - 'resources' is a list of regular expressions that matches a set of resources to apply the policy to. This parameter
## is optional and matches any resource if not provided.
##
## Note: the order of the rules is important. The first policy matching (domain, resource, subject) applies.
access_control:
## Default policy can either be 'bypass', 'one_factor', 'two_factor' or 'deny'. It is the policy applied to any
## resource if there is no policy to be applied to the user.
default_policy: deny
networks:
- name: internal
networks:
- 192.168.1.0/24
- 192.168.2.0/24
- 192.168.10.0/24
- name: VPN
networks: 192.168.5.0/24
rules:
## Allow favicons on internal network
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
resources:
- '^/apple-touch-icon-precomposed\.png$'
- '^/assets/safari-pinned-tab\.svg$'
- '^/apple-touch-icon-180x180\.png$'
- '^/apple-touch-icon\.png$'
- '^/favicon\.ico$'
networks:
- internal
policy: bypass
{{ range nomadVarList "authelia/access_control/service_rules" }}{{ with nomadVar .Path }}
- domain: '{{ .name }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
{{ .rule.Value | indent 6 }}
{{ end }}{{ end }}
## Rules applied to everyone
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
networks:
- internal
policy: one_factor
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
policy: two_factor
- domain:
# TODO: Drive these from Nomad variables
- 'secure.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
policy: two_factor
session:
## The name of the session cookie.
name: authelia_session
domain: {{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
# Stored in a secrets file
# secret: <in file>
expiration: 1h
inactivity: 5m
remember_me_duration: 1M
redis:
host: 127.0.0.1
port: 6379
# username: authelia
# password: authelia
# database_index: 0
maximum_active_connections: 8
minimum_idle_connections: 0
regulation:
max_retries: 3
find_time: 2m
ban_time: 5m
##
## Storage Provider Configuration
##
## The available providers are: `local`, `mysql`, `postgres`. You must use one and only one of these providers.
storage:
# encryption_key: <in file>
##
## MySQL / MariaDB (Storage Provider)
##
mysql:
host: 127.0.0.1
port: 3306
{{ with nomadVar "nomad/jobs/authelia" }}
database: {{ .db_name }}
username: {{ .db_user }}
# password: <in_file>
{{- end }}
timeout: 5s
##
## Notification Provider
##
## Notifications are sent to users when they require a password reset, a Webauthn registration or a TOTP registration.
## The available providers are: filesystem, smtp. You must use only one of these providers.
notifier:
## You can disable the notifier startup check by setting this to true.
disable_startup_check: true
{{ with nomadVar "secrets/smtp" }}
smtp:
host: {{ .server }}
port: {{ .port }}
username: {{ .user }}
# password: <in file>
{{- end }}
{{ with nomadVar "nomad/jobs/authelia" }}
sender: "{{ .email_sender }}"
## Subject configuration of the emails sent. {title} is replaced by the text from the notifier.
subject: "[Authelia] {title}"
## This address is used during the startup check to verify the email configuration is correct.
## It's not important what it is except if your email server only allows local delivery.
startup_check_address: test@iamthefij.com
{{- end }}
identity_providers:
oidc:
# hmac_secret: <file>
# issuer_private_key: <file>
clients:
{{ range nomadVarList "authelia/access_control/oidc_clients" -}}
{{- $name := (sprig_last (sprig_splitList "/" .Path)) -}}
{{ "-" | indent 6 }}
{{ with nomadVar .Path }}
{{- $im := .ItemsMap -}}
{{- $im = sprig_set $im "redirect_uris" (.redirect_uris.Value | parseYAML) -}}
{{- $im = sprig_set $im "scopes" (.scopes.Value | parseYAML) -}}
{{- with nomadVar (printf "secrets/authelia/%s" $name) -}}
{{- $im = sprig_set $im "secret" .secret_hash.Value -}}
{{- end -}}
{{ $im | toYAML | indent 8 }}
{{ end }}
{{ end }}

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.0.0"
hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}

View File

@ -1,369 +0,0 @@
job "blocky" {
datacenters = ["dc1"]
type = "service"
priority = 100
constraint {
distinct_hosts = true
}
update {
max_parallel = 1
auto_revert = true
min_healthy_time = "60s"
healthy_deadline = "5m"
}
group "blocky" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/12023
count = 2
network {
mode = "bridge"
port "dns" {
static = "53"
}
port "api" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = "4000"
}
dns {
# Set expclicit DNS servers because tasks, by default, use this task
servers = [
"192.168.2.1",
]
}
}
service {
name = "blocky-dns"
provider = "nomad"
port = "dns"
}
service {
name = "blocky-api"
provider = "nomad"
port = "api"
tags = [
"prometheus.scrape",
"traefik.enable=true",
"traefik.http.routers.blocky-api.entryPoints=websecure",
]
check {
name = "api-health"
port = "api"
type = "http"
path = "/"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
}
task "blocky" {
driver = "docker"
config {
image = "ghcr.io/0xerr0r/blocky:v0.24"
args = ["-c", "$${NOMAD_TASK_DIR}/config.yml"]
ports = ["dns", "api"]
}
action "refresh-lists" {
command = "/app/blocky"
args = ["lists", "refresh"]
}
action "healthcheck" {
command = "/app/blocky"
args = ["healthcheck"]
}
resources {
cpu = 50
memory = 75
memory_max = 150
}
template {
data = <<EOF
${file("${module_path}/config.yml")}
EOF
destination = "$${NOMAD_TASK_DIR}/config.yml"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ range nomadServices }}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") .Name -}}
{{ .Address }} {{ .Name }}.nomad
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/nomad.hosts"
change_mode = "noop"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/user" }}
{{ with nomadVar "blocky_lists/user" -}}
{{ .block_list.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/block"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "30s"
max = "1m"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/user" }}
{{ with nomadVar "blocky_lists/user" -}}
{{ .allow_list.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/allow"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "30s"
max = "1m"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .smarttv_regex.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/smarttv-regex.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .wemo.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/wemo.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .sonos.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/sonos.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 20
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[dns_server]
# Dummy server to keep stunnel running if no mysql is present
accept = 8053
connect = 127.0.0.1:53
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
[mysql_client]
client = yes
accept = 127.0.0.1:3306
connect = {{ .Address }}:{{ .Port }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
{{- end }}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}}
[redis_client]
client = yes
accept = 127.0.0.1:6379
connect = {{ .Address }}:{{ .Port }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/blocky" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
template {
data = <<EOF
{{- with nomadVar "nomad/jobs/blocky/blocky/stunnel" -}}{{ .redis_stunnel_psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
}
task "mysql-bootstrap" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "mariadb:10"
args = [
"/bin/bash",
"-c",
"/usr/bin/timeout 2m /bin/bash -c \"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do echo 'Retry in 10s'; sleep 10; done\" || true",
]
}
template {
data = <<EOF
[client]
host=127.0.0.1
port=3306
user=root
{{ with nomadVar "secrets/mysql" }}
password={{ .mysql_root_password }}
{{ end }}
EOF
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/blocky" }}{{ if .db_name -}}
{{ $db_name := .db_name }}
CREATE DATABASE IF NOT EXISTS `{{ $db_name }}`;
CREATE USER IF NOT EXISTS '{{ .db_user }}'@'%' IDENTIFIED BY '{{ .db_pass }}';
GRANT ALL ON `{{ $db_name }}`.* to '{{ .db_user }}'@'%';
{{ with nomadService "grafana" }}{{ with nomadVar "nomad/jobs" -}}
-- Grant grafana read_only user access to db
GRANT SELECT ON `{{ $db_name }}`.* to '{{ .db_user_ro }}'@'%';
{{ end }}{{ end -}}
{{ else -}}
SELECT 'NOOP';
{{ end -}}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql"
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -1,88 +0,0 @@
resource "nomad_job" "blocky" {
jobspec = templatefile("${path.module}/blocky.nomad", {
use_wesher = var.use_wesher,
module_path = path.module,
})
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "blocky_mysql_bootstrap_secrets" {
name = "blocky-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "mysql-bootstrap"
}
}
resource "random_password" "blocky_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "blocky_mysql_psk" {
path = "secrets/mysql/allowed_psks/blocky"
items = {
psk = "blocky:${resource.random_password.blocky_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "blocky_mysql_psk" {
name = "blocky-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/blocky" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "stunnel"
}
}
resource "nomad_variable" "blocky_lists_terraform" {
path = "blocky_lists/terraform"
items = {
smarttv_regex = file("${path.module}/list-smarttv-regex.txt")
wemo = file("${path.module}/list-wemo.txt")
sonos = file("${path.module}/list-sonos.txt")
}
}
resource "nomad_acl_policy" "blocky_lists" {
name = "blocky-lists"
description = "Give access Blocky lists"
rules_hcl = <<EOH
namespace "default" {
variables {
path "blocky_lists/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "blocky"
}
}

View File

@ -1,142 +0,0 @@
ports:
dns: 53
http: 4000
# I must have ip v6 blocked or something
connectIPVersion: v4
bootstrapDns:
- upstream: 1.1.1.1
- upstream: 1.0.0.1
- upstream: 9.9.9.9
- upstream: 149.112.112.112
upstreams:
init:
strategy: fast
groups:
default:
- https://dns.quad9.net/dns-query
- tcp-tls:dns.quad9.net
- https://one.one.one.one/dns-query
- tcp-tls:one.one.one.one
# cloudflare:
# - 1.1.1.1
# - 1.0.0.1
# - 2606:4700:4700::1111
# - 2606:4700:4700::1001
# - https://one.one.one.one/dns-query
# - tcp-tls:one.one.one.one
# quad9:
# - 9.9.9.9
# - 149.112.112.112
# - 2620:fe::fe
# - 2620:fe::9
# - https://dns.quad9.net/dns-query
# - tcp-tls:dns.quad9.net
# quad9-secured:
# - 9.9.9.11
# - 149.112.112.11
# - 2620:fe::11
# - 2620:fe::fe:11
# - https://dns11.quad9.net/dns-query
# - tcp-tls:dns11.quad9.net
# quad9-unsecured:
# - 9.9.9.10
# - 149.112.112.10
# - 2620:fe::10
# - 2620:fe::fe:10
# - https://dns10.quad9.net/dns-query
# - tcp-tls:dns10.quad9.net
conditional:
fallbackUpstream: false
mapping:
home.arpa: 192.168.2.1
in-addr.arpa: 192.168.2.1
iot: 192.168.2.1
local: 192.168.2.1
thefij: 192.168.2.1
.: 192.168.2.1
hostsFile:
sources:
- {{ env "NOMAD_TASK_DIR" }}/nomad.hosts
hostsTTL: 30s
loading:
refreshPeriod: 30s
clientLookup:
upstream: 192.168.2.1
blocking:
blackLists:
ads:
- https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
- http://sysctl.org/cameleon/hosts
- https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt
- https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
# - https://hosts-file.net/ad_servers.txt
iot:
- https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV.txt
- {{ env "NOMAD_TASK_DIR" }}/smarttv-regex.txt
- {{ env "NOMAD_TASK_DIR" }}/wemo.txt
- {{ env "NOMAD_TASK_DIR" }}/sonos.txt
antisocial:
- |
facebook.com
instagram.com
reddit.com
twitter.com
youtube.com
custom:
- {{ env "NOMAD_TASK_DIR" }}/block
whiteLists:
custom:
- {{ env "NOMAD_TASK_DIR" }}/allow
clientGroupsBlock:
default:
- ads
- custom
192.168.3.1/24:
- ads
- iot
- custom
customDNS:
customTTL: 1h
mapping:
{{ with nomadVar "nomad/jobs/blocky" }}{{ .mappings.Value | indent 4 }}{{ end }}
# Catch all at top domain to traefik
{{ with nomadService "traefik" -}}
{{- $last := len . | subtract 1 -}}
{{- $services := . -}}
{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}: {{ range $i := loop $last -}}
{{- with index $services $i }}{{ .Address }},{{ end -}}
{{- end -}}
{{- with index . $last }}{{ .Address }}{{ end -}}
{{- end }}
prometheus:
enable: true
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}}
redis:
address: 127.0.0.1:6379
# password: ""
# database: 0
connectionAttempts: 10
connectionCooldown: 3s
{{ end -}}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
{{ with nomadVar "nomad/jobs/blocky" -}}
queryLog:
type: mysql
target: {{ .db_user }}:{{ .db_pass }}@tcp(127.0.0.1:3306)/{{ .db_name }}?charset=utf8mb4&parseTime=True&loc=Local
logRetentionDays: 14
{{ end -}}
{{ end -}}

View File

@ -1,13 +0,0 @@
# From: https://perflyst.github.io/PiHoleBlocklist/regex.list
# Title: Perflyst's SmartTV Blocklist for Pi-hole - RegEx extension
# Version: 13July2023v1
# Samsung
/(^|\.)giraffic\.com$/
/(^|\.)internetat\.tv$/
/(^|\.)pavv\.co\.kr$/
/(^|\.)samsungcloudsolution\.net$/
/(^|\.)samsungelectronics\.com$/
/(^|\.)samsungrm\.net$/
# /(^|\.)samsungotn\.net$/ # prevents updates
# /(^|\.)samsungcloudcdn\.com$/ # prevents updates
# /(^|\.)samsungcloudsolution\.com$/ # prevents internet connection

View File

@ -1,2 +0,0 @@
# Block Sonos devices from phoning home and allowing remote access
/(^|\.)sonos\.com$/

View File

@ -1,8 +0,0 @@
# Remote commands
api.xbcs.net
# Firmware updates
fw.xbcs.net
# TURN service
nat.wemo2.com
# Connectivity checks
heartbeat.xwemo.com

View File

@ -1,5 +0,0 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

View File

@ -1,48 +0,0 @@
job "ddclient" {
datacenters = ["dc1"]
type = "service"
group "ddclient" {
task "ddclient" {
driver = "docker"
config {
image = "ghcr.io/linuxserver/ddclient:v3.10.0-ls104"
mount {
type = "bind"
source = "secrets/ddclient.conf"
target = "/config/ddclient.conf"
}
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/ddclient" -}}
daemon=900
ssl=yes
use=web
web=api.myip.com
protocol=cloudflare,
zone={{ .zone }},
ttl=1,
login=token,
password={{ .domain_ddclient }}
{{ .domain }}
{{- end }}
EOH
destination = "secrets/ddclient.conf"
change_mode = "restart"
}
resources {
cpu = 50
memory = 50
memory_max = 100
}
}
}
}

View File

@ -1,143 +0,0 @@
job "exporters" {
datacenters = ["dc1"]
type = "service"
priority = 55
constraint {
distinct_hosts = true
}
group "promtail" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/1202
count = 2
network {
mode = "bridge"
port "promtail" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 9080
}
}
service {
name = "promtail"
provider = "nomad"
port = "promtail"
meta {
nomad_dc = "$${NOMAD_DC}"
nomad_node_name = "$${node.unique.name}"
}
tags = [
"prometheus.scrape",
]
}
task "promtail" {
driver = "docker"
config {
image = "grafana/promtail:3.3.0"
args = ["-config.file=$${NOMAD_TASK_DIR}/promtail.yml"]
ports = ["promtail"]
# Bind mount host machine-id and log directories
mount {
type = "bind"
source = "/etc/machine-id"
target = "/etc/machine-id"
readonly = true
}
mount {
type = "bind"
source = "/var/log/journal/"
target = "/var/log/journal/"
readonly = true
}
mount {
type = "bind"
source = "/run/log/journal/"
target = "/run/log/journal/"
readonly = true
}
# mount {
# type = "bind"
# source = "/var/log/audit"
# target = "/var/log/audit"
# readonly = true
# }
}
template {
data = <<EOF
---
server:
http_listen_address: 0.0.0.0
http_listen_port: 9080
clients:
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "loki" -}}
- url: http://{{ .Address }}:{{ .Port }}/loki/api/v1/push
{{- end }}
scrape_configs:
- job_name: journal
journal:
json: false
max_age: 12h
path: /var/log/journal
labels:
job: systemd-journal
relabel_configs:
- source_labels: ['__journal__systemd_unit']
target_label: unit
- source_labels: ['__journal__hostname']
target_label: hostname
- source_labels: ['__journal__transport']
target_label: journal_transport
# Docker log labels
- source_labels: ['__journal_syslog_identifier']
target_label: syslog_identifier
- source_labels: ['__journal_image_name']
target_label: docker_image_name
- source_labels: ['__journal_container_name']
target_label: docker_container_name
- source_labels: ['__journal_container_id']
target_label: docker_container_id
- source_labels: ['__journal_com_docker_compose_project']
target_label: docker_compose_project
- source_labels: ['__journal_com_docker_compose_service']
target_label: docker_compose_service
- source_labels: ['__journal_com_hashicorp_nomad_alloc_id']
target_label: nomad_alloc_id
- source_labels: ['__journal_com_hashicorp_nomad_job_id']
target_label: nomad_job_id
- source_labels: ['__journal_com_hashicorp_nomad_job_name']
target_label: nomad_job_name
- source_labels: ['__journal_com_hashicorp_nomad_node_name']
target_label: nomad_node_name
- source_labels: ['__journal_com_hashicorp_nomad_group_name']
target_label: nomad_group_name
- source_labels: ['__journal_com_hashicorp_nomad_task_name']
target_label: nomad_task_name
EOF
destination = "$${NOMAD_TASK_DIR}/promtail.yml"
}
resources {
cpu = 50
memory = 100
}
}
}
}

View File

@ -1,5 +0,0 @@
resource "nomad_job" "exporters" {
jobspec = templatefile("${path.module}/exporters.nomad", {
use_wesher = var.use_wesher,
})
}

View File

@ -1,300 +0,0 @@
job "grafana" {
datacenters = ["dc1"]
group "grafana" {
count = 1
network {
mode = "bridge"
port "web" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 3000
}
}
ephemeral_disk {
migrate = true
sticky = true
}
service {
name = "grafana"
provider = "nomad"
port = "web"
tags = [
"traefik.enable=true",
"traefik.http.routers.grafana.entryPoints=websecure",
]
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/grafana" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
}
task "mysql-bootstrap" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "mariadb:10"
args = [
"/usr/bin/timeout",
"20m",
"/bin/bash",
"-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do echo 'Retry in 10s'; sleep 10; done",
]
}
template {
data = <<EOF
[client]
host=127.0.0.1
port=3306
user=root
{{ with nomadVar "secrets/mysql" -}}
password={{ .mysql_root_password }}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/grafana" -}}
{{ if .db_name -}}
CREATE DATABASE IF NOT EXISTS `{{ .db_name }}`;
CREATE USER IF NOT EXISTS '{{ .db_user }}'@'%' IDENTIFIED BY '{{ .db_pass }}';
GRANT ALL ON `{{ .db_name }}`.* to '{{ .db_user }}'@'%';
-- Create Read Only user
CREATE USER IF NOT EXISTS '{{ .db_user_ro }}'@'%' IDENTIFIED BY '{{ .db_pass_ro }}';
{{ else -}}
SELECT 'NOOP';
{{ end -}}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql"
}
resources {
cpu = 50
memory = 50
}
}
task "grafana" {
driver = "docker"
config {
image = "grafana/grafana:10.0.10"
args = ["--config", "$${NOMAD_ALLOC_DIR}/config/grafana.ini"]
ports = ["web"]
}
env = {
"GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel,natel-discrete-panel",
"GF_PATHS_CONFIG" = "$${NOMAD_ALLOC_DIR}/config/grafana.ini",
"GF_PATHS_PROVISIONING" = "$${NOMAD_ALLOC_DIR}/config/provisioning",
}
template {
data = <<EOF
{{ with nomadVar "secrets/smtp" -}}
GF_SMTP_USER={{ .user }}
GF_SMTP_PASSWORD={{ .password }}
{{ end -}}
{{ with nomadVar "nomad/jobs/grafana" -}}
GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }}
GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .minio_access_key }}
GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .minio_secret_key }}
GRAFANA_ALERT_EMAIL_ADDRESSES={{ .alert_email_addresses }}
{{ if .db_name -}}
# Database storage
GF_DATABASE_TYPE=mysql
GF_DATABASE_HOST=127.0.0.1:3306
GF_DATABASE_NAME={{ .db_name }}
GF_DATABASE_USER={{ .db_user }}
GF_DATABASE_PASSWORD={{ .db_pass }}
{{ end -}}
SLACK_BOT_URL={{ .slack_bot_url }}
SLACK_BOT_TOKEN={{ .slack_bot_token }}
SLACK_HOOK_URL={{ .slack_hook_url }}
{{ end -}}
{{ with nomadVar "secrets/authelia/grafana" -}}
GF_AUTH_GENERIC_OAUTH_CLIENT_ID={{ .client_id }}
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET={{ .secret }}
{{ end -}}
EOF
env = true
destination = "secrets/conf.env"
}
resources {
cpu = 100
memory = 200
}
}
task "grafana-reprovisioner" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/startup.sh"]
}
resources {
cpu = 50
memory = 50
}
action "reloadnow" {
command = "/local/reload_config.sh"
}
env = {
LOG_FILE = "/var/log/grafana_reloader.log"
}
template {
data = <<EOF
#! /bin/sh
apk add curl
touch "$LOG_FILE"
exec tail -f "$LOG_FILE"
EOF
perms = "777"
destination = "$${NOMAD_TASK_DIR}/startup.sh"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/grafana" -}}
GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }}
{{ end -}}
EOF
env = true
destination = "$${NOMAD_SECRETS_DIR}/conf.env"
}
template {
data = <<EOF
#! /bin/sh
exec > "$LOG_FILE"
exec 2>&1
GRAFANA_URL=http://127.0.0.1:3000
echo "Reload dashboards"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/dashboards/reload
echo "Reload datasources"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/datasources/reload
echo "Reload plugins"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/plugins/reload
echo "Reload notifications"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/notifications/reload
echo "Reload access-control"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/access-control/reload
echo "Reload alerting"
curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/alerting/reload
EOF
change_mode = "noop"
perms = "777"
destination = "$${NOMAD_TASK_DIR}/reload_config.sh"
}
%{ for config_file in fileset(join("/", [module_path, "grafana"]), "**") ~}
template {
data = <<EOF
${file(join("/", [module_path, "grafana", config_file]))}
EOF
destination = "$${NOMAD_ALLOC_DIR}/config/${config_file}"
perms = 777
# Set owner to grafana uid
# uid = 472
# Change template delimeter for dashboard files that use json and have double curly braces and square braces
%{ if endswith(config_file, ".json") ~}
left_delimiter = "<<<<"
right_delimiter = ">>>>"
%{ endif }
change_mode = "script"
change_script {
command = "/local/reload_config.sh"
}
}
%{ endfor }
}
task "grafana-image-renderer" {
driver = "docker"
constraint {
attribute = "$${attr.cpu.arch}"
value = "amd64"
}
config {
image = "grafana/grafana-image-renderer:3.6.1"
ports = ["renderer"]
}
env = {
"RENDERING_MODE" = "clustered"
"RENDERING_CLUSTERING_MODE" = "browser"
"RENDERING_CLUSTERING_MAX_CONCURRENCY" = 5
"RENDERING_CLUSTERING_TIMEOUT" = 30
}
}
}
}

View File

@ -1,117 +0,0 @@
resource "nomad_job" "grafana" {
jobspec = templatefile("${path.module}/grafana.nomad", {
module_path = path.module
use_wesher = var.use_wesher
})
depends_on = [nomad_job.prometheus]
}
resource "nomad_acl_policy" "grafana_smtp_secrets" {
name = "grafana-secrets-smtp"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/smtp" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "grafana"
}
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "grafana_mysql_bootstrap_secrets" {
name = "grafana-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "mysql-bootstrap"
}
}
resource "random_password" "grafana_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "grafana_mysql_psk" {
path = "secrets/mysql/allowed_psks/grafana"
items = {
psk = "grafana:${resource.random_password.grafana_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "grafana_mysql_psk" {
name = "grafana-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/grafana" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "stunnel"
}
}
module "grafana_oidc" {
source = "./oidc_client"
name = "grafana"
oidc_client_config = {
description = "Grafana"
scopes = [
"openid",
"groups",
"email",
"profile",
]
redirect_uris = [
"https://grafana.thefij.rocks/login/generic_oauth",
]
}
job_acl = {
job_id = "grafana"
group = "grafana"
task = "grafana"
}
}
# resource "nomad_variable" "grafana_config" {
# for_each = fileset("${path.module}/grafana", "**")
#
# path = "nomad/jobs/grafana/${replace(each.key, ".", "_")}"
# items = {
# path = "${each.key}"
# value = file("${path.module}/grafana/${each.key}")
# left_delimiter = endswith(each.key, ".json") ? "<<<<" : "{{"
# right_delimiter = endswith(each.key, ".json") ? ">>>>" : "}}"
# }
# }

View File

@ -1,455 +0,0 @@
##################### Grafana Configuration Example #####################
#
# Everything has defaults so you only need to uncomment things you want to
# change
# possible values : production, development
;app_mode = production
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
;instance_name = ${HOSTNAME}
#################################### Paths ####################################
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
data = /var/lib/grafana
# Directory where grafana can store logs
;logs = /var/log/grafana
# Directory where grafana will automatically scan and look for plugins
;plugins = /var/lib/grafana/plugins
# folder that contains PROVISIONING config files that grafana will apply on startup and while running.
provisioning = from_env
#################################### Server ####################################
[server]
# Protocol (http, https, socket)
;protocol = http
# The ip address to bind to, empty will bind to all interfaces
;http_addr =
# The http port to use
;http_port = 3000
# The public facing domain name used to access grafana from a browser
;domain = localhost
# Redirect to correct domain if host header does not match domain
# Prevents DNS rebinding attacks
;enforce_domain = false
# The full public facing url you use in browser, used for redirects and emails
# If you use reverse proxy and sub path specify full url (with sub path)
root_url = https://grafana.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
# Log web requests
;router_logging = false
# the path relative working path
;static_root_path = public
# enable gzip
;enable_gzip = false
# https certs & key file
;cert_file =
;cert_key =
# Unix socket path
;socket =
#################################### Database ####################################
[database]
# You can configure the database connection by specifying type, host, name, user and password
# as separate properties or as on string using the url properties.
# Either "mysql", "postgres" or "sqlite3", it's your choice
;type = sqlite3
;host = 127.0.0.1:3306
;name = grafana
;user = root
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
;password =
# Use either URL or the previous fields to configure the database
# Example: mysql://user:secret@host:port/database
;url =
# For "postgres" only, either "disable", "require" or "verify-full"
;ssl_mode = disable
# For "sqlite3" only, path relative to data_path setting
;path = grafana.db
# Max idle conn setting default is 2
;max_idle_conn = 2
# Max conn setting default is 0 (mean not set)
;max_open_conn =
# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours)
;conn_max_lifetime = 14400
# Set to true to log the sql calls and execution times.
log_queries =
#################################### Session ####################################
[session]
# Either "memory", "file", "redis", "mysql", "postgres", default is "file"
;provider = file
# Provider config options
# memory: not have any config yet
# file: session dir path, is relative to grafana data_path
# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
# mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`
# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
;provider_config = sessions
# Session cookie name
;cookie_name = grafana_sess
# If you use session in https only, default is false
;cookie_secure = false
# Session life time, default is 86400
;session_life_time = 86400
#################################### Data proxy ###########################
[dataproxy]
# This enables data proxy logging, default is false
;logging = false
#################################### Analytics ####################################
[analytics]
# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
# No ip addresses are being tracked, only simple counters to track
# running instances, dashboard and error counts. It is very helpful to us.
# Change this option to false to disable reporting.
;reporting_enabled = true
# Set to false to disable all checks to https://grafana.net
# for new vesions (grafana itself and plugins), check is used
# in some UI views to notify that grafana or plugin update exists
# This option does not cause any auto updates, nor send any information
# only a GET request to http://grafana.com to get latest versions
;check_for_updates = true
# Google Analytics universal tracking code, only enabled if you specify an id here
;google_analytics_ua_id =
#################################### Security ####################################
[security]
# default admin user, created on startup
;admin_user = admin
# default admin password, can be changed before first start of grafana, or in profile settings
;admin_password = admin
# used for signing
;secret_key = SW2YcwTIb9zpOOhoPsMm
# Auto-login remember days
;login_remember_days = 7
;cookie_username = grafana_user
;cookie_remember_name = grafana_remember
# disable gravatar profile images
;disable_gravatar = false
# data source proxy whitelist (ip_or_domain:port separated by spaces)
;data_source_proxy_whitelist =
# disable protection against brute force login attempts
;disable_brute_force_login_protection = false
#################################### Snapshots ###########################
[snapshots]
# snapshot sharing options
;external_enabled = true
;external_snapshot_url = https://snapshots-origin.raintank.io
;external_snapshot_name = Publish to snapshot.raintank.io
# remove expired snapshot
;snapshot_remove_expired = true
#################################### Dashboards History ##################
[dashboards]
# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1
;versions_to_keep = 20
#################################### Users ###############################
[users]
# disable user signup / registration
;allow_sign_up = true
# Allow non admin users to create organizations
;allow_org_create = true
# Set to true to automatically assign new users to the default organization (id 1)
;auto_assign_org = true
# Default role new users will be automatically assigned (if disabled above is set to true)
;auto_assign_org_role = Viewer
# Background text for the user field on the login page
;login_hint = email or username
# Default UI theme ("dark" or "light")
;default_theme = dark
# External user management, these options affect the organization users view
;external_manage_link_url =
;external_manage_link_name =
;external_manage_info =
# Viewers can edit/inspect dashboard settings in the browser. But not save the dashboard.
;viewers_can_edit = false
[auth]
# Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false
;disable_login_form = false
# Set to true to disable the signout link in the side menu. useful if you use auth.proxy, defaults to false
;disable_signout_menu = false
# URL to redirect the user to after sign out
;signout_redirect_url =
#################################### Anonymous Auth ##########################
[auth.anonymous]
# enable anonymous access
;enabled = false
# specify organization name that should be used for unauthenticated users
;org_name = Main Org.
# specify role for unauthenticated users
;org_role = Viewer
#################################### Github Auth ##########################
[auth.github]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email,read:org
;auth_url = https://github.com/login/oauth/authorize
;token_url = https://github.com/login/oauth/access_token
;api_url = https://api.github.com/user
;team_ids =
;allowed_organizations =
#################################### Google Auth ##########################
[auth.google]
;enabled = false
;allow_sign_up = true
;client_id = some_client_id
;client_secret = some_client_secret
;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
;auth_url = https://accounts.google.com/o/oauth2/auth
;token_url = https://accounts.google.com/o/oauth2/token
;api_url = https://www.googleapis.com/oauth2/v1/userinfo
;allowed_domains =
#################################### Generic OAuth ##########################
[auth.generic_oauth]
enabled = true
name = Authelia
;allow_sign_up = true
client_id = from_env
client_secret = from_env
scopes = openid profile email groups
auth_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/authorization
token_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/token
api_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/userinfo
login_attribute_path = preferred_username
groups_attribute_path = groups
name_attribute_path = name
# Role attribute path is not working
role_attribute_path = contains(groups[*], 'admin') && 'Admin' || contains(groups[*], 'grafana-admin') && 'Admin' || contains(groups[*], 'grafana-editor') && 'Editor' || contains(groups[*], 'developer') && 'Editor'
allow_assign_grafana_admin = true
skip_org_role_sync = true
use_pkce = true
;team_ids =
;allowed_organizations =
#################################### Grafana.com Auth ####################
[auth.grafana_com]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email
;allowed_organizations =
#################################### Auth Proxy ##########################
[auth.proxy]
{{ with nomadService "traefik" -}}
enabled = false
header_name = Remote-User
header_property = username
auto_sign_up = true
{{- $last := len . | subtract 1 -}}
{{- $services := . -}}
whitelist = {{ range $i := loop $last -}}
{{- with index $services $i }}{{ .Address }},{{ end -}}
{{- end -}}
{{- with index . $last }}{{ .Address }}{{ end -}}
{{- end }}
#################################### Basic Auth ##########################
[auth.basic]
;enabled = true
#################################### Auth LDAP ##########################
[auth.ldap]
;enabled = false
;config_file = /etc/grafana/ldap.toml
;allow_sign_up = true
#################################### SMTP / Emailing ##########################
[smtp]
enabled = true
host = smtp.mailgun.org:587
user = from_env
# If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
password = from_env
;cert_file =
;key_file =
;skip_verify = false
from_address = grafana@iamthefij.com
from_name = Grafana
# EHLO identity in SMTP dialog (defaults to instance_name)
;ehlo_identity = dashboard.example.com
[emails]
;welcome_email_on_sign_up = false
#################################### Logging ##########################
[log]
# Either "console", "file", "syslog". Default is console and file
# Use space to separate multiple modes, e.g. "console file"
;mode = console file
# Either "debug", "info", "warn", "error", "critical", default is "info"
;level = info
# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
;filters =
# For "console" mode only
[log.console]
;level =
# log line format, valid options are text, console and json
;format = console
# For "file" mode only
[log.file]
;level =
# log line format, valid options are text, console and json
;format = text
# This enables automated log rotate(switch of following options), default is true
;log_rotate = true
# Max line number of single file, default is 1000000
;max_lines = 1000000
# Segment log daily, default is true
;daily_rotate = true
# Expired days of log file(delete after max days), default is 7
;max_days = 7
[log.syslog]
;level =
# log line format, valid options are text, console and json
;format = text
# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
;network =
;address =
# Syslog facility. user, daemon and local0 through local7 are valid.
;facility =
# Syslog tag. By default, the process' argv[0] is used.
;tag =
#################################### Alerting ############################
[alerting]
# Disable alerting engine & UI features
;enabled = true
# Makes it possible to turn off alert rule execution but alerting UI is visible
;execute_alerts = true
#################################### Explore #############################
[explore]
# Enable the Explore section
;enabled = false
#################################### Internal Grafana Metrics ##########################
# Metrics available at HTTP API Url /metrics
[metrics]
# Disable / Enable internal metrics
enabled = true
# Publish interval
;interval_seconds = 10
# Send internal metrics to Graphite
[metrics.graphite]
# Enable by setting the address setting (ex localhost:2003)
;address =
;prefix = prod.grafana.%(instance_name)s.
#################################### Distributed tracing ############
[tracing.jaeger]
# Enable by setting the address sending traces to jaeger (ex localhost:6831)
;address = localhost:6831
# Tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
;always_included_tag = tag1:value1
# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
;sampler_type = const
# jaeger samplerconfig param
# for "const" sampler, 0 or 1 for always false/true respectively
# for "probabilistic" sampler, a probability between 0 and 1
# for "rateLimiting" sampler, the number of spans per second
# for "remote" sampler, param is the same as for "probabilistic"
# and indicates the initial sampling rate before the actual one
# is received from the mothership
;sampler_param = 1
#################################### Grafana.com integration ##########################
# Url used to to import dashboards directly from Grafana.com
[grafana_com]
;url = https://grafana.com
#################################### External image storage ##########################
[external_image_storage]
# Used for uploading images to public servers so they can be included in slack/email messages.
# you can choose between (s3, webdav, gcs, azure_blob, local)
provider = s3
[external_image_storage.s3]
endpoint = https://minio.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
bucket = grafana-images
region = us-east-1
path_style_access = true
[external_image_storage.local]
# does not require any configuration
[rendering]
# Since they are inside the same group, we can reference their bound ports
server_url = http://127.0.0.1:8081/render
callback_url = http://127.0.0.1:3000/

View File

@ -1,882 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"description": "Query report for blocky (MySQL)",
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": 14980,
"graphTooltip": 0,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
},
"decimals": 0,
"mappings": [],
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 0,
"y": 0
},
"id": 14,
"links": [],
"options": {
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"values": [
"value"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "8.1.2",
"repeatDirection": "v",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "time_series",
"group": [],
"metricColumn": "none",
"rawQuery": true,
"rawSql": "SELECT t.response_type, t.request_Ts as time, count(*) as cnt from log_entries t \n WHERE $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0)\n group by t.response_type\n order by t.request_Ts",
"refId": "A",
"select": [
[
{
"params": [
"value"
],
"type": "column"
}
]
],
"timeColumn": "time",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Query count by response type",
"transformations": [],
"type": "piechart"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
},
"decimals": 0,
"mappings": [],
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 6,
"y": 0
},
"id": 16,
"links": [],
"options": {
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"values": [
"value"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "time_series",
"group": [],
"metricColumn": "none",
"rawQuery": true,
"rawSql": "SELECT t.request_ts AS time,\n case when t.reason like 'BLOCKED%' then SUBSTRING_INDEX(SUBSTRING_INDEX(t.reason,'(',-1), ')',1) else '' end AS metric,\n count(t.reason) AS cnt\nFROM log_entries t\nWHERE t.response_type ='BLOCKED'\n AND $__timeFilter(t.request_Ts)\n AND t.client_name in ($client_name)\n AND (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0)\nGROUP BY 2\nORDER BY time",
"refId": "A",
"select": [
[
{
"params": [
"duration_ms"
],
"type": "column"
}
]
],
"table": "log_entries",
"timeColumn": "request_ts",
"timeColumnType": "timestamp",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Blocked by Blacklist",
"type": "piechart"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"displayName": "$__cell_1",
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 13,
"links": [],
"options": {
"displayMode": "gradient",
"minVizHeight": 10,
"minVizWidth": 0,
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": true
},
"showUnfilled": true,
"text": {}
},
"pluginVersion": "9.2.4",
"repeatDirection": "v",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "table",
"group": [],
"metricColumn": "f",
"rawQuery": true,
"rawSql": "SELECT t.request_Ts as time, t.client_name as metric, count(*) as cnt from log_entries t \n WHERE $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0)\n group by t.client_name\n order by 3 desc",
"refId": "A",
"select": [
[
{
"params": [
"value"
],
"type": "column"
}
]
],
"timeColumn": "time",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Query count by client",
"transformations": [],
"type": "bargauge"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "Top 20 effective top level domain plus one more label",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"displayName": "$__cell_0",
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"id": 11,
"options": {
"displayMode": "gradient",
"minVizHeight": 10,
"minVizWidth": 0,
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": true
},
"showUnfilled": true
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "table",
"group": [],
"hide": false,
"metricColumn": "question_name",
"rawQuery": true,
"rawSql": "SELECT t.effective_tldp as metric, count(*) as value from log_entries t \nWHERE $__timeFilter(t.request_Ts) \n and t.response_type in ($response_type) \n and t.client_name in ($client_name) \n and (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0) \n group by t.effective_tldp order by count(*) desc limit 20",
"refId": "A",
"select": [
[
{
"params": [
"value"
],
"type": "column"
}
]
],
"table": "log_entries",
"timeColumn": "request_ts",
"where": []
}
],
"title": "Top 20 effective TLD+1",
"type": "bargauge"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"displayName": "$__cell_0",
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 8
},
"id": 8,
"options": {
"displayMode": "gradient",
"minVizHeight": 10,
"minVizWidth": 0,
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": true
},
"showUnfilled": true
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "table",
"group": [],
"hide": false,
"metricColumn": "question_name",
"rawQuery": true,
"rawSql": "SELECT t.question_name as metric, count(*) as value from log_entries t \n WHERE $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0) \n group by t.question_name order by count(*) desc limit 20",
"refId": "A",
"select": [
[
{
"params": [
"value"
],
"type": "column"
}
]
],
"table": "log_entries",
"timeColumn": "request_ts",
"where": []
}
],
"title": "Top 20 queried domains",
"type": "bargauge"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 24,
"x": 0,
"y": 16
},
"id": 12,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "time_series",
"group": [],
"metricColumn": "none",
"rawQuery": true,
"rawSql": "SELECT\n $__timeGroupAlias(t.request_Ts, '30m'),\n t.client_name as metric,\n count(*) as c\nFROM log_entries t\nWHERE\n $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0)\nGROUP BY 1,2\nORDER BY 1",
"refId": "A",
"select": [
[
{
"params": [
"duration_ms"
],
"type": "column"
}
]
],
"table": "log_entries",
"timeColumn": "request_ts",
"timeColumnType": "timestamp",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Queries number per client (30m)",
"type": "timeseries"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "dtdurationms"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 23
},
"id": 10,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "time_series",
"group": [],
"metricColumn": "none",
"rawQuery": true,
"rawSql": "SELECT\n UNIX_TIMESTAMP(t.request_Ts) as time,\n t.duration_ms\nFROM log_entries t\nWHERE\n $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0)\nORDER BY request_ts",
"refId": "A",
"select": [
[
{
"params": [
"duration_ms"
],
"type": "column"
}
]
],
"table": "log_entries",
"timeColumn": "request_ts",
"timeColumnType": "timestamp",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Query duration",
"type": "timeseries"
},
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"description": "Last 100 queries, newest on top",
"fieldConfig": {
"defaults": {
"custom": {
"displayMode": "auto",
"filterable": false,
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "time"
},
"properties": [
{
"id": "unit",
"value": "dateTimeAsIsoNoDateIfToday"
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 31
},
"id": 4,
"options": {
"footer": {
"fields": "",
"reducer": [
"sum"
],
"show": false
},
"showHeader": true
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"format": "table",
"group": [],
"metricColumn": "none",
"rawQuery": true,
"rawSql": "SELECT UNIX_TIMESTAMP(t.request_Ts) as \"time\", \n t.client_ip as \"client IP\", \n t.client_name as \"client name\", \n t.duration_ms as \"duration in ms\", \n t.response_type as \"response type\", \n t.question_type as \"question type\", \n t.question_name as \"question name\", \n t.effective_tldp as \"effective TLD+1\", \n t.answer as \"answer\" from log_entries t \n WHERE $__timeFilter(t.request_Ts) and \n t.response_type in ($response_type) and \n t.client_name in ($client_name) and \n (length('$question') = 0 or INSTR(t.question_name, lower('$question')) > 0) \n order by t.request_Ts desc limit 100",
"refId": "A",
"select": [
[
{
"params": [
"value"
],
"type": "column"
}
]
],
"timeColumn": "time",
"where": [
{
"name": "$__timeFilter",
"params": [],
"type": "macro"
}
]
}
],
"title": "Last queries",
"type": "table"
}
],
"refresh": "",
"schemaVersion": 37,
"style": "dark",
"tags": [],
"templating": {
"list": [
{
"allValue": "",
"current": {
"selected": false,
"text": "All",
"value": "$__all"
},
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"definition": "select distinct client_name from log_entries",
"hide": 0,
"includeAll": true,
"label": "Client name",
"multi": true,
"name": "client_name",
"options": [],
"query": "select distinct client_name from log_entries",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"current": {
"selected": true,
"text": [
"All"
],
"value": [
"$__all"
]
},
"datasource": {
"type": "mysql",
"uid": "DN2DNsD4z"
},
"definition": "select distinct response_type from log_entries",
"hide": 0,
"includeAll": true,
"label": "Response type",
"multi": true,
"name": "response_type",
"options": [],
"query": "select distinct response_type from log_entries",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"current": {
"selected": false,
"text": "",
"value": ""
},
"hide": 0,
"label": "Domain (contains)",
"name": "question",
"options": [
{
"selected": true,
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
}
]
},
"time": {
"from": "now-24h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Blocky query",
"uid": "AVmWSVWgz",
"version": 1,
"weekStart": ""
}

File diff suppressed because it is too large Load Diff

View File

@ -1,411 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"fieldConfig": {
"defaults": {
"color": {
"fixedColor": "rgb(31, 120, 193)",
"mode": "fixed"
},
"mappings": [
{
"options": {
"match": "null",
"result": {
"text": "N/A"
}
},
"type": "special"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "#d44a3a",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": 0
},
{
"color": "#299c46",
"value": 1
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 4,
"x": 0,
"y": 0
},
"id": 2,
"links": [],
"maxDataPoints": 100,
"options": {
"colorMode": "none",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"expr": "count(minitor_monitor_up_count)",
"format": "time_series",
"instant": true,
"intervalFactor": 2,
"refId": "A"
}
],
"title": "Total Monitors",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"decimals": 0,
"mappings": [
{
"options": {
"match": "null",
"result": {
"text": "N/A"
}
},
"type": "special"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "#299c46",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": 1
},
{
"color": "#d44a3a",
"value": 2
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 4,
"x": 4,
"y": 0
},
"id": 6,
"links": [],
"maxDataPoints": 100,
"options": {
"colorMode": "background",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"expr": "count(minitor_monitor_up_count)-count(minitor_monitor_up_count>=1)",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
"refId": "A"
}
],
"title": "Total Down Services",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"fillOpacity": 70,
"lineWidth": 0,
"spanNulls": 900000
},
"mappings": [
{
"options": {
"0": {
"index": 1,
"text": "Down"
},
"1": {
"index": 0,
"text": "Up"
}
},
"type": "value"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "red",
"value": null
},
{
"color": "red",
"value": 0
},
{
"color": "green",
"value": 1
}
]
},
"unit": "bool_on_off"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 14,
"x": 8,
"y": 0
},
"id": 4,
"links": [],
"options": {
"alignValue": "left",
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": false
},
"mergeValues": true,
"rowHeight": 0.9,
"showValue": "never",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "9.2.4",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"editorMode": "code",
"exemplar": false,
"expr": "sum(minitor_monitor_up_count) by (monitor)",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
"legendFormat": "{{monitor}}",
"range": true,
"refId": "A"
}
],
"title": "Service Status",
"type": "state-timeline"
},
{
"columns": [],
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"fontSize": "100%",
"gridPos": {
"h": 11,
"w": 22,
"x": 0,
"y": 7
},
"id": 8,
"links": [],
"scroll": true,
"showHeader": true,
"sort": {
"col": 0,
"desc": true
},
"styles": [
{
"alias": "Time",
"align": "auto",
"dateFormat": "YYYY-MM-DD HH:mm:ss",
"pattern": "Time",
"type": "date"
},
{
"alias": "Status",
"align": "auto",
"colorMode": "cell",
"colors": [
"#F2495C",
"#F2495C",
"rgba(50, 172, 45, 0.97)"
],
"decimals": 0,
"link": false,
"mappingType": 1,
"pattern": "Value",
"thresholds": [
"0",
"1"
],
"type": "string",
"unit": "short",
"valueMaps": [
{
"text": "Ok",
"value": "1"
},
{
"text": "Not Ok",
"value": "0"
}
]
}
],
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "rS2OIfv4z"
},
"expr": "minitor_monitor_up_count",
"format": "time_series",
"instant": true,
"intervalFactor": 1,
"legendFormat": "{{monitor}}",
"refId": "A"
}
],
"title": "Monitor status",
"transform": "timeseries_to_rows",
"type": "table-old"
}
],
"refresh": "30s",
"schemaVersion": 37,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Minitor Monitor",
"uid": "qoE5Hrxiz",
"version": 1,
"weekStart": ""
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +0,0 @@
apiVersion: 1
providers:
- name: default
folder: 'Provisioned'
type: file
disableDeletion: false
options:
path: {{ env "NOMAD_ALLOC_DIR" }}/config/provisioning/dashboards/default

View File

@ -1,19 +0,0 @@
---
apiVersion: 1
datasources:
- name: HASS Metrics
url: "http://192.168.2.75:8086"
type: influxdb
access: proxy
database: hass
jsonData:
dbName: hass
- name: Proxmox Metrics
url: "http://192.168.2.75:8086"
type: influxdb
access: proxy
database: proxmox
jsonData:
dbName: proxmox

View File

@ -1,12 +0,0 @@
---
apiVersion: 1
datasources:
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "loki" }}
- name: Loki
url: "http://{{ .Address }}:{{ .Port }}"
type: loki
access: proxy
isDefault: false
version: 1
{{ end }}

View File

@ -1,16 +0,0 @@
---
apiVersion: 1
datasources:
- name: Blocky logs
url: 127.0.0.1:3306
# TODO: Looking for an acl friendly way to expose this since it's a variable in blocky setup
database: blocky
type: mysql
isDefault: false
version: 1
{{ with nomadVar "nomad/jobs/grafana" -}}
user: {{ .db_user_ro }}
secureJsonData:
password: {{ .db_pass_ro }}
{{- end }}

View File

@ -1,12 +0,0 @@
---
apiVersion: 1
datasources:
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "prometheus" }}
- name: Prometheus
url: "http://{{ .Address }}:{{ .Port }}"
type: prometheus
access: proxy
isDefault: true
version: 1
{{ end }}

View File

@ -1,11 +0,0 @@
---
{{ with nomadVar "nomad/jobs/grafana" -}}
notifiers:
- name: Personal email
type: email
uid: email-1
org_id: 1
is_default: false
settings:
addresses: "{{ .alert_email_addresses }}"
{{ end -}}

View File

@ -1,27 +0,0 @@
---
{{ with nomadVar "nomad/jobs/grafana" -}}
notifiers:
- name: Slack Bot
type: slack
uid: slack-1
org_id: 1
is_default: false
settings:
url: "{{ .slack_bot_url }}"
recipient: "#site-notifications"
username: Grafana Alerts
icon_url: https://grafana.iamthefij.com/public/img/grafana_icon.svg
token: "{{ .slack_bot_token }}"
uploadImage: true
mentionChannel: channel
- name: Slack Hook
type: slack
uid: slack-2
org_id: 1
is_default: true
settings:
url: "{{ .slack_hook_url }}"
icon_url: https://grafana.iamthefij.com/public/img/grafana_icon.svg
uploadImage: true
mentionChannel: channel
{{ end -}}

View File

@ -1,96 +0,0 @@
variable "lego_version" {
default = "4.14.2"
type = string
}
variable "nomad_var_dirsync_version" {
default = "0.0.2"
type = string
}
job "lego" {
type = "batch"
periodic {
cron = "@weekly"
prohibit_overlap = true
}
group "main" {
network {
dns {
servers = ["1.1.1.1", "1.0.0.1"]
}
}
task "main" {
driver = "exec"
config {
command = "/bin/bash"
args = ["${NOMAD_TASK_DIR}/start.sh"]
}
artifact {
source = "https://github.com/go-acme/lego/releases/download/v${var.lego_version}/lego_v${var.lego_version}_linux_${attr.cpu.arch}.tar.gz"
}
artifact {
source = "https://git.iamthefij.com/iamthefij/nomad-var-dirsync/releases/download/v${var.nomad_var_dirsync_version}/nomad-var-dirsync-linux-${attr.cpu.arch}.tar.gz"
}
template {
data = <<EOH
#! /bin/sh
set -ex
cd ${NOMAD_TASK_DIR}
echo "Read certs from nomad vars"
${NOMAD_TASK_DIR}/nomad-var-dirsync-linux-{{ env "attr.cpu.arch" }} -root-var=secrets/certs read .
action=run
if [ -f /.lego/certificates/_.thefij.rocks.crt ]; then
action=renew
fi
echo "Attempt to $action certificates"
${NOMAD_TASK_DIR}/lego \
--accept-tos --pem \
--email=iamthefij@gmail.com \
--domains="*.thefij.rocks" \
--dns="cloudflare" \
$action \
--$action-hook="${NOMAD_TASK_DIR}/nomad-var-dirsync-linux-{{ env "attr.cpu.arch" }} -root-var=secrets/certs write .lego" \
EOH
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/lego" -}}
CF_DNS_API_TOKEN={{ .domain_lego_dns }}
CF_ZONE_API_TOKEN={{ .domain_lego_dns }}
{{- end }}
EOH
destination = "secrets/cloudflare.env"
env = true
}
env = {
NOMAD_ADDR = "unix:///secrets/api.sock"
}
identity {
env = true
}
resources {
cpu = 50
memory = 100
}
}
}
}

View File

@ -1,23 +0,0 @@
resource "nomad_job" "lego" {
jobspec = file("${path.module}/lego.nomad")
}
resource "nomad_acl_policy" "secrets_certs_write" {
name = "secrets-certs-write"
description = "Write certs to secrets store"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/certs/*" {
capabilities = ["write", "read"]
}
path "secrets/certs" {
capabilities = ["write", "read"]
}
}
}
EOH
job_acl {
job_id = "lego/*"
}
}

View File

@ -1,41 +0,0 @@
auth_enabled: false
server:
http_listen_port: 3100
common:
ring:
instance_addr: 127.0.0.1
kvstore:
store: inmemory
replication_factor: 1
path_prefix: /tmp/loki
schema_config:
configs:
- from: 2020-05-15
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
storage_config:
boltdb_shipper:
active_index_directory: {{ env "NOMAD_TASK_DIR" }}/index
filesystem:
directory: {{ env "NOMAD_TASK_DIR" }}/chunks
limits_config:
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 168h
table_manager:
retention_deletes_enabled: true
retention_period: 168h

View File

@ -1,24 +0,0 @@
module "loki" {
source = "../services/service"
detach = false
name = "loki"
image = "grafana/loki:2.8.7"
args = ["--config.file=$${NOMAD_TASK_DIR}/loki-config.yml"]
service_port = 3100
ingress = true
use_wesher = var.use_wesher
service_check = {
path = "/ready"
}
sticky_disk = true
templates = [
{
data = file("${path.module}/loki-config.yml")
dest = "loki-config.yml"
mount = false
}
]
}

View File

@ -1,27 +0,0 @@
module "blocky" {
source = "./blocky"
use_wesher = var.use_wesher
# Not in this module
# depends_on = [module.databases]
}
module "traefik" {
source = "./traefik"
}
resource "nomad_job" "nomad-client-stalker" {
# Stalker used to allow using Nomad service registry to identify nomad client hosts
jobspec = file("${path.module}/nomad-client-stalker.nomad")
}
resource "nomad_job" "syslog-ng" {
jobspec = file("${path.module}/syslogng.nomad")
depends_on = [module.loki]
}
resource "nomad_job" "ddclient" {
jobspec = file("${path.module}/ddclient.nomad")
}

View File

@ -1,32 +0,0 @@
job "nomad-client-stalker" {
type = "system"
group "main" {
network {
mode = "host"
port "main" {}
}
service {
name = "nomad-client-stalker"
provider = "nomad"
port = "main"
}
task "main" {
driver = "docker"
config {
image = "busybox"
args = ["tail", "-f", "/dev/null"]
}
resources {
cpu = 10
memory = 15
memory_max = 30
}
}
}
}

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.3.1"
hashes = [
"h1:lMueBNB2GJ/a5rweL9NPybwVfDH/Q1s+rQvt5Y+kuYs=",
"zh:1e7893a3fbebff171bcc5581b70a16eea33193c7e9dd73402ba5c04b7202f0bb",
"zh:252cfd3fee4811c83bc74406ba1bc1bbb83d6de20e50a86f93737f8f86864171",
"zh:387a7140be6dfa3f8d27f09d1eb2b9f3b84900328fe5a0478e9b3bd91a845808",
"zh:49848fa491ac26b0568b112a57d14cc49772607c7cf405e2f74dd537407214b1",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:7b9f345f5bb5f17c5d0bc3d373c25828934a3cbcdb331e0eab54eb47f1355fb2",
"zh:8e276f4de508a86e725fffc02ee891db73397c35dbd591d8918af427eeec93a1",
"zh:90b349933d2fd28f822a36128be4625bb816aa9f20ec314c79c77306f632ae87",
"zh:a0ca6fd6cd94a52684e432104d3dc170a74075f47d9d4ba725cc340a438ed75a",
"zh:a6cffc45535a0ff8206782538b3eeaef17dc93d0e1fd58bc1e6f7d5aa0f6ba1a",
"zh:c010807b5d3e03d769419787b0e5d4efa6963134e1873a413102af6bf3dd1c49",
"zh:faf962ee1981e897e99f7e528642c7e74beed37afd8eaf743e6ede24df812d80",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.2"
hashes = [
"h1:wmG0QFjQ2OfyPy6BB7mQ57WtoZZGGV07uAPQeDmIrAE=",
"zh:0ef01a4f81147b32c1bea3429974d4d104bbc4be2ba3cfa667031a8183ef88ec",
"zh:1bcd2d8161e89e39886119965ef0f37fcce2da9c1aca34263dd3002ba05fcb53",
"zh:37c75d15e9514556a5f4ed02e1548aaa95c0ecd6ff9af1119ac905144c70c114",
"zh:4210550a767226976bc7e57d988b9ce48f4411fa8a60cd74a6b246baf7589dad",
"zh:562007382520cd4baa7320f35e1370ffe84e46ed4e2071fdc7e4b1a9b1f8ae9b",
"zh:5efb9da90f665e43f22c2e13e0ce48e86cae2d960aaf1abf721b497f32025916",
"zh:6f71257a6b1218d02a573fc9bff0657410404fb2ef23bc66ae8cd968f98d5ff6",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:9647e18f221380a85f2f0ab387c68fdafd58af6193a932417299cdcae4710150",
"zh:bb6297ce412c3c2fa9fec726114e5e0508dd2638cad6a0cb433194930c97a544",
"zh:f83e925ed73ff8a5ef6e3608ad9225baa5376446349572c2449c0c0b3cf184b7",
"zh:fbef0781cb64de76b1df1ca11078aecba7800d82fd4a956302734999cfd9a4af",
]
}

View File

@ -1,50 +0,0 @@
resource "random_password" "oidc_client_id" {
length = 72
override_special = "-._~"
}
resource "random_password" "oidc_secret" {
length = 72
override_special = "-._~"
}
resource "nomad_variable" "authelia_oidc_secret" {
path = "secrets/authelia/${var.name}"
items = {
client_id = resource.random_password.oidc_client_id.result
secret = resource.random_password.oidc_secret.result
secret_hash = resource.random_password.oidc_secret.bcrypt_hash
}
}
resource "nomad_variable" "authelia_access_control_oidc" {
path = "authelia/access_control/oidc_clients/${var.name}"
items = {
id = resource.random_password.oidc_client_id.result
description = var.oidc_client_config.description
authorization_policy = var.oidc_client_config.authorization_policy
redirect_uris = yamlencode(var.oidc_client_config.redirect_uris)
scopes = yamlencode(var.oidc_client_config.scopes)
}
}
resource "nomad_acl_policy" "oidc_authelia" {
count = var.job_acl != null ? 1 : 0
name = "${var.name}-authelia"
description = "Give access to shared authelia variables"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/authelia/${var.name}" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = var.job_acl.job_id
group = var.job_acl.group
task = var.job_acl.task
}
}

View File

@ -1,11 +0,0 @@
output "client_id" {
value = resource.random_password.oidc_client_id.result
}
output "secret" {
value = resource.random_password.oidc_secret.result
}
output "secret_hash" {
value = resource.random_password.oidc_secret.bcrypt_hash
}

View File

@ -1,25 +0,0 @@
variable "name" {
description = "Name of service"
type = string
}
variable "oidc_client_config" {
description = "Authelia oidc client configuration to enable oidc authentication"
type = object({
description = string
authorization_policy = optional(string, "one_factor")
redirect_uris = list(string)
scopes = list(string)
})
}
variable "job_acl" {
description = "Job ACL that should be given to the secrets"
type = object({
job_id = string
group = optional(string)
task = optional(string)
})
default = null
}

View File

@ -1,169 +0,0 @@
job "prometheus" {
datacenters = ["dc1"]
group "prometheus" {
count = 1
network {
mode = "bridge"
port "web" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 9090
}
port "pushgateway" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
static = 9091
}
}
ephemeral_disk {
migrate = true
sticky = true
}
service {
name = "prometheus"
provider = "nomad"
port = "web"
// TODO: Remove traefik tags
tags = [
"traefik.enable=true",
"traefik.http.routers.prometheus.entryPoints=websecure",
]
check {
type = "http"
path = "/-/healthy"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
}
service {
name = "pushgateway"
provider = "nomad"
port = "pushgateway"
check {
type = "http"
path = "/-/healthy"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
}
task "prometheus" {
driver = "docker"
config {
image = "prom/prometheus:v2.43.0"
ports = ["web"]
args = [
"--config.file=$${NOMAD_TASK_DIR}/prometheus.yml",
"--storage.tsdb.path=$${NOMAD_ALLOC_DIR}/data/tsdb",
"--web.listen-address=0.0.0.0:9090",
"--web.console.libraries=/usr/share/prometheus/console_libraries",
"--web.console.templates=/usr/share/prometheus/consoles",
]
}
template {
data = <<EOF
---
global:
scrape_interval: 30s
evaluation_interval: 3s
scrape_configs:
- job_name: prometheus
static_configs:
- targets:
- 127.0.0.1:9090
- job_name: "pushgateway"
honor_labels: true
static_configs:
- targets:
- 127.0.0.1:9091
- job_name: "nomad_client"
metrics_path: "/v1/metrics"
params:
format:
- "prometheus"
nomad_sd_configs:
# TODO: Use NOMAD_SECRETS_DIR/api.sock and workload idenity when
# workload acls can be set using terraform
- server: "http://{{env "attr.unique.network.ip-address"}}:4646"
relabel_configs:
- source_labels: [__meta_nomad_service]
regex: nomad-client-stalker
action: keep
- source_labels: [__meta_nomad_address]
replacement: "$1:4646"
target_label: __address__
- job_name: "nomad_services"
metrics_path: "/metrics"
nomad_sd_configs:
- server: "http://{{env "attr.unique.network.ip-address"}}:4646"
relabel_configs:
- source_labels: [__meta_nomad_tags]
regex: .*(prometheus.scrape).*
action: keep
- source_labels: [__meta_nomad_service_address,__meta_nomad_service_port]
separator: ":"
target_label: __address__
- source_labels: [__meta_nomad_service]
target_label: nomad_service
- source_labels: [__meta_nomad_dc]
target_label: nomad_dc
- source_labels: [__meta_nomad_node_id]
target_label: nomad_node_id
EOF
change_mode = "signal"
change_signal = "SIGHUP"
destination = "$${NOMAD_TASK_DIR}/prometheus.yml"
}
resources {
cpu = 100
memory = 300
}
}
task "pushgateway" {
driver = "docker"
config {
image = "prom/pushgateway"
ports = ["pushgateway"]
args = [
"--persistence.file=$${NOMAD_ALLOC_DIR}/pushgateway-persistence",
]
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -1,7 +0,0 @@
resource "nomad_job" "prometheus" {
jobspec = templatefile("${path.module}/prometheus.nomad", {
use_wesher = var.use_wesher,
})
detach = false
}

View File

@ -1,137 +0,0 @@
job "syslogng" {
datacenters = ["dc1"]
type = "service"
group "promtail" {
count = 1
network {
mode = "bridge"
port "main" {
to = 1514
}
port "metrics" {
to = 9080
}
}
service {
name = "syslogng-promtail"
provider = "nomad"
port = "main"
}
task "promtail" {
driver = "docker"
config {
image = "grafana/promtail:3.3.0"
ports = ["main", "metrics"]
args = ["--config.file=/etc/promtail/promtail.yml"]
mount {
type = "bind"
target = "/etc/promtail/promtail.yml"
source = "local/promtail.yml"
}
}
template {
data = <<EOF
---
server:
http_listen_address: 0.0.0.0
http_listen_port: 9080
clients:
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "loki" -}}
- url: http://{{ .Address }}:{{ .Port }}/loki/api/v1/push
{{- end }}
scrape_configs:
# TCP syslog receiver
- job_name: syslog
syslog:
listen_address: 0.0.0.0:{{ env "NOMAD_PORT_main" }}
labels:
job: syslog
relabel_configs:
- source_labels: ['__syslog_message_hostname']
target_label: hostname
EOF
destination = "local/promtail.yml"
}
resources {
cpu = 50
memory = 50
}
}
}
group "syslogng" {
count = 1
network {
mode = "bridge"
port "main" {
to = 514
}
}
service {
name = "syslogng"
provider = "nomad"
port = "main"
}
task "syslogng" {
driver = "docker"
config {
image = "balabit/syslog-ng:3.37.1"
ports = ["main"]
args = ["--no-caps"]
mount {
type = "bind"
target = "/etc/syslog-ng/syslog-ng.conf"
source = "local/syslog-ng.conf"
}
}
template {
data = <<EOF
@version: 3.37
@include "scl.conf"
source s_network {
default-network-drivers(
);
};
source s_internal {
internal();
};
destination d_loki {
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "syslogng-promtail" -}}
syslog("{{ .Address }}" transport("tcp") port({{ .Port }}));
{{- end }}
};
log { source(s_internal); destination(d_loki); };
log { source(s_network); destination(d_loki); };
EOF
destination = "local/syslog-ng.conf"
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -1,21 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.1.0"
hashes = [
"h1:ek0L7fA+4R1/BXhbutSRqlQPzSZ5aY/I2YfVehuYeEU=",
"zh:39ba4d4fc9557d4d2c1e4bf866cf63973359b73e908cce237c54384512bdb454",
"zh:40d2b66e3f3675e6b88000c145977c1d5288510c76b702c6c131d9168546c605",
"zh:40fbe575d85a083f96d4703c6b7334e9fc3e08e4f1d441de2b9513215184ebcc",
"zh:42ce6db79e2f94557fae516ee3f22e5271f0b556638eb45d5fbad02c99fc7af3",
"zh:4acf63dfb92f879b3767529e75764fef68886521b7effa13dd0323c38133ce88",
"zh:72cf35a13c2fb542cd3c8528826e2390db9b8f6f79ccb41532e009ad140a3269",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:8b8bcc136c05916234cb0c3bcc3d48fda7ca551a091ad8461ea4ab16fb6960a3",
"zh:8e1c2f924eae88afe7ac83775f000ae8fd71a04e06228edf7eddce4df2421169",
"zh:abc6e725531fc06a8e02e84946aaabc3453ecafbc1b7a442ea175db14fd9c86a",
"zh:b735fcd1fb20971df3e92f81bb6d73eef845dcc9d3d98e908faa3f40013f0f69",
"zh:ce59797282505d872903789db8f092861036da6ec3e73f6507dac725458a5ec9",
]
}

View File

@ -1,333 +0,0 @@
job "traefik" {
datacenters = ["dc1"]
type = "service"
priority = 100
constraint {
attribute = "${node.class}"
value = "ingress"
}
constraint {
distinct_hosts = true
}
update {
max_parallel = 1
canary = 1
auto_promote = false
auto_revert = true
min_healthy_time = "30s"
healthy_deadline = "5m"
}
group "traefik" {
count = 2
network {
port "web" {
static = 80
}
port "websecure" {
static = 443
}
port "syslog" {
static = 514
}
port "gitssh" {
static = 2222
}
port "metrics" {}
dns {
servers = [
"192.168.2.101",
"192.168.2.102",
"192.168.2.30",
]
}
}
ephemeral_disk {
migrate = true
sticky = true
}
task "traefik" {
driver = "docker"
service {
name = "traefik"
provider = "nomad"
port = "web"
check {
type = "http"
path = "/ping"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.traefik.entryPoints=websecure",
"traefik.http.routers.traefik.service=api@internal",
]
}
service {
name = "traefik-metrics"
provider = "nomad"
port = "metrics"
tags = [
"prometheus.scrape",
]
}
config {
image = "traefik:3.0"
ports = ["web", "websecure", "syslog", "gitssh", "metrics"]
network_mode = "host"
mount {
type = "bind"
target = "/etc/traefik"
source = "local/config"
}
mount {
type = "bind"
target = "/etc/traefik/usersfile"
source = "secrets/usersfile"
}
mount {
type = "bind"
target = "/etc/traefik/certs"
source = "secrets/certs"
}
}
env = {
TRAEFIK_PROVIDERS_NOMAD_ENDPOINT_TOKEN = "${NOMAD_TOKEN}"
}
identity {
env = true
}
template {
# Avoid conflict with TOML lists [[ ]] and Go templates {{ }}
left_delimiter = "<<"
right_delimiter = ">>"
data = <<EOH
[log]
level = "DEBUG"
[entryPoints]
[entryPoints.web]
address = ":80"
[entryPoints.web.http]
[entryPoints.web.http.redirections]
[entryPoints.web.http.redirections.entrypoint]
to = "websecure"
scheme = "https"
[entryPoints.websecure]
address = ":443"
[entryPoints.websecure.http.tls]
[entryPoints.metrics]
address = ":<< env "NOMAD_PORT_metrics" >>"
[entryPoints.syslogtcp]
address = ":514"
[entryPoints.syslogudp]
address = ":514/udp"
[entryPoints.gitssh]
address = ":2222"
[api]
dashboard = true
[ping]
entrypoint = "web"
[metrics]
[metrics.prometheus]
entrypoint = "metrics"
# manualRouting = true
[providers.file]
directory = "/etc/traefik/conf"
watch = true
[providers.nomad]
exposedByDefault = false
defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)"
[providers.nomad.endpoint]
address = "unix:///secrets/api.sock"
EOH
destination = "${NOMAD_TASK_DIR}/config/traefik.toml"
}
template {
data = <<EOH
[http]
[http.routers]
[http.routers.nomad]
entryPoints = ["websecure"]
service = "nomad"
rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
{{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path }}
[http.routers.{{ .name }}]
entryPoints = ["websecure"]
service = "{{ .name }}"
rule = "Host(`{{ .subdomain }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`){{ with .path_prefix.Value }}&&PathPrefix(`{{ . }}`){{ end }}"
{{ $name := .name -}}
{{ with .path_prefix.Value -}}
middlewares = ["{{ $name }}@file"]
{{ end }}
{{- end }}{{ end }}
#[http.middlewares]
# {{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path -}}
# {{ $name := .name -}}
# {{ with .path_prefix.Value -}}
# [http.middlewares.{{ $name }}.stripPrefix]
# prefixes = ["{{ . }}"]
# {{ end }}
# {{- end }}{{ end }}
[http.services]
[http.services.nomad]
[http.services.nomad.loadBalancer]
[[http.services.nomad.loadBalancer.servers]]
url = "http://127.0.0.1:4646"
{{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path }}
[http.services.{{ .name }}]
[http.services.{{ .name }}.loadBalancer]
[[http.services.{{ .name }}.loadBalancer.servers]]
url = "{{ .url }}"
{{- end }}{{ end }}
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/route-hashi.toml"
change_mode = "noop"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOH
{{ with nomadService "syslogng" -}}
[tcp.routers]
[tcp.routers.syslogtcp]
entryPoints = ["syslogtcp"]
service = "syslogngtcp"
rule = "HostSNI(`*`)"
[tcp.services]
[tcp.services.syslogngtcp]
[tcp.services.syslogngtcp.loadBalancer]
{{ range . -}}
[[tcp.services.syslogngtcp.loadBalancer.servers]]
address = "{{ .Address }}:{{ .Port }}"
{{ end -}}
{{- end }}
{{ with nomadService "syslogng" -}}
[udp.routers]
[udp.routers.syslogudp]
entryPoints = ["syslogudp"]
service = "syslogngudp"
[udp.services]
[udp.services.syslogngudp]
[udp.services.syslogngudp.loadBalancer]
{{ range . -}}
[[udp.services.syslogngudp.loadBalancer.servers]]
address = "{{ .Address }}:{{ .Port }}"
{{ end -}}
{{- end }}
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/route-syslog-ng.toml"
change_mode = "noop"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{- with nomadVar "secrets/certs/_lego/certificates/__thefij_rocks_crt" }}{{ .contents }}{{ end -}}"
EOF
destination = "${NOMAD_SECRETS_DIR}/certs/_.thefij.rocks.crt"
change_mode = "noop"
}
template {
data = <<EOF
{{- with nomadVar "secrets/certs/_lego/certificates/__thefij_rocks_key" }}{{ .contents }}{{ end -}}"
EOF
destination = "${NOMAD_SECRETS_DIR}/certs/_.thefij.rocks.key"
change_mode = "noop"
}
template {
data = <<EOH
[[tls.certificates]]
certFile = "/etc/traefik/certs/_.thefij.rocks.crt"
keyFile = "/etc/traefik/certs/_.thefij.rocks.key"
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/dynamic-tls.toml"
change_mode = "noop"
}
template {
data = <<EOH
[http.middlewares]
{{ with nomadVar "nomad/jobs/traefik" }}
{{ if .usersfile }}
[http.middlewares.basic-auth.basicAuth]
usersFile = "/etc/traefik/usersfile"
{{- end }}
{{- end }}
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/middlewares.toml"
change_mode = "noop"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/traefik" -}}
{{ .usersfile }}
{{- end }}
EOH
destination = "${NOMAD_SECRETS_DIR}/usersfile"
change_mode = "noop"
}
resources {
cpu = 100
memory = 150
}
}
}
}

View File

@ -1,90 +0,0 @@
resource "nomad_job" "traefik" {
jobspec = file("${path.module}/traefik.nomad")
}
resource "nomad_acl_policy" "treafik_secrets_certs_read" {
name = "traefik-secrets-certs-read"
description = "Read certs to secrets store"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/certs/*" {
capabilities = ["read"]
}
path "secrets/certs" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = resource.nomad_job.traefik.id
}
}
resource "nomad_acl_policy" "traefik_query_jobs" {
name = "traefik-query-jobs"
description = "Allow traefik to query jobs"
rules_hcl = <<EOH
namespace "default" {
capabilities = ["list-jobs", "read-job"]
}
EOH
job_acl {
job_id = resource.nomad_job.traefik.id
}
}
resource "nomad_acl_policy" "treafik_external" {
name = "traefik-exernal"
description = "Read external services"
rules_hcl = <<EOH
namespace "default" {
variables {
path "traefik_external/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "traefik"
}
}
resource "nomad_variable" "traefik_external_hass" {
path = "traefik_external/hass"
items = {
name = "hass"
subdomain = "hass",
url = "http://192.168.3.65:8123"
}
}
resource "nomad_variable" "traefik_external_plex" {
path = "traefik_external/plex"
items = {
name = "plex"
subdomain = "plex",
url = "http://agnosticfront.thefij:32400"
}
}
resource "nomad_variable" "traefik_external_appdaemon" {
path = "traefik_external/appdaemon"
items = {
name = "appdaemon"
subdomain = "appdash",
url = "http://192.168.3.65:5050"
# path_prefix = "/add"
}
}
resource "nomad_variable" "traefik_external_jellyfin" {
path = "traefik_external/jellyfin"
items = {
name = "jellyfin"
subdomain = "jellyfin",
url = "http://agnosticfront.thefij:8096"
}
}

View File

@ -1,11 +0,0 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.0.0"
hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}

View File

@ -1,253 +0,0 @@
job "lldap" {
datacenters = ["dc1"]
type = "service"
priority = 80
update {
auto_revert = true
}
group "lldap" {
network {
mode = "bridge"
port "web" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
}
port "ldap" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
}
port "tls" {}
}
service {
name = "lldap"
provider = "nomad"
port = "ldap"
}
service {
name = "lldap-tls"
provider = "nomad"
port = "tls"
}
service {
name = "ldap-admin"
provider = "nomad"
port = "web"
tags = [
"traefik.enable=true",
"traefik.http.routers.ldap-admin.entryPoints=websecure",
]
}
task "lldap" {
driver = "docker"
config {
image = "ghcr.io/lldap/lldap:v0.5"
ports = ["ldap", "web"]
args = ["run", "--config-file", "$${NOMAD_TASK_DIR}/lldap_config.toml"]
}
env = {
"LLDAP_VERBOSE" = "true"
"LLDAP_LDAP_PORT" = "$${NOMAD_PORT_ldap}"
"LLDAP_HTTP_PORT" = "$${NOMAD_PORT_web}"
"LLDAP_DATABASE_URL_FILE" = "$${NOMAD_SECRETS_DIR}/database_url.txt"
"LLDAP_KEY_SEED_FILE" = "$${NOMAD_SECRETS_DIR}/key_seed.txt"
"LLDAP_JWT_SECRET_FILE" = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
"LLDAP_USER_PASS_FILE" = "$${NOMAD_SECRETS_DIR}/user_pass.txt"
"LLDAP_SMTP_OPTIONS__PASSWORD_FILE" = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
}
template {
data = <<EOH
ldap_base_dn = "{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}"
{{ with nomadVar "secrets/ldap" -}}
ldap_user_dn = "{{ .admin_user }}"
ldap_user_email = "{{ .admin_email }}"
{{ end -}}
{{ with nomadVar "nomad/jobs/lldap" -}}
[smtp_options]
from = "{{ .smtp_from }}"
reply_to = "{{ .smtp_reply_to }}"
enable_password_reset = true
{{ end -}}
{{ with nomadVar "secrets/smtp" -}}
server = "{{ .server }}"
port = {{ .port }}
tls_required = {{ .tls.Value | toLower }}
user = "{{ .user }}"
{{ end -}}
EOH
destination = "$${NOMAD_TASK_DIR}/lldap_config.toml"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}mysql://{{ .db_user }}:{{ .db_pass }}@127.0.0.1:3306/{{ .db_name }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/database_url.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}{{ .key_seed }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/key_seed.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}{{ .jwt_secret }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"secrets/ldap\" }}{{ .admin_password }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/user_pass.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"secrets/smtp\" }}{{ .password }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
change_mode = "restart"
}
resources {
cpu = 10
memory = 200
memory_max = 200
}
}
task "bootstrap" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "mariadb:10"
args = [
"/usr/bin/timeout",
"20m",
"/bin/bash",
"-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
]
}
template {
data = <<EOF
[client]
host=127.0.0.1
port=3306
user=root
{{ with nomadVar "secrets/mysql" -}}
password={{ .mysql_root_password }}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/lldap" -}}
{{ $db_name := .db_name }}
CREATE DATABASE IF NOT EXISTS `{{ .db_name }}`
CHARACTER SET = 'utf8mb4'
COLLATE = 'utf8mb4_unicode_ci';
DROP USER IF EXISTS '{{ .db_user }}'@'%';
CREATE USER '{{ .db_user }}'@'%'
IDENTIFIED BY '{{ .db_pass }}';
GRANT ALL ON `{{ .db_name }}`.*
TO '{{ .db_user }}'@'%';
{{ else -}}
SELECT 'NOOP';
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql"
}
resources {
cpu = 50
memory = 50
}
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[ldap_server]
accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:{{ env "NOMAD_PORT_ldap" }}
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ range nomadVarList "secrets/ldap/allowed_psks" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }}
{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
template {
data = <<EOF
{{- with nomadVar "secrets/mysql/allowed_psks/lldap" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
}
}
}

View File

@ -1,123 +0,0 @@
resource "nomad_job" "lldap" {
jobspec = templatefile("${path.module}/lldap.nomad", {
use_wesher = var.use_wesher,
})
depends_on = [resource.nomad_job.mysql-server]
# Block until deployed as there are servics dependent on this one
detach = false
}
# Give access to ldap secrets
resource "nomad_acl_policy" "lldap_ldap_secrets" {
name = "lldap-secrets-ldap"
description = "Give access to LDAP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/ldap/*" {
capabilities = ["read"]
}
path "secrets/ldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "lldap_ldap_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "lldap_ldap_psk" {
path = "secrets/ldap/allowed_psks/ldap"
items = {
psk = "lldap:${resource.random_password.lldap_ldap_psk.result}"
}
}
# Give access to smtp secrets
resource "nomad_acl_policy" "lldap_smtp_secrets" {
name = "lldap-secrets-smtp"
description = "Give access to SMTP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/smtp" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "lldap"
}
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "lldap_mysql_bootstrap_secrets" {
name = "lldap-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "bootstrap"
}
}
resource "random_password" "lldap_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "lldap_mysql_psk" {
path = "secrets/mysql/allowed_psks/lldap"
items = {
psk = "lldap:${resource.random_password.lldap_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "lldap_mysql_psk" {
name = "lldap-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/lldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "stunnel"
}
}

View File

@ -1,123 +0,0 @@
job "mysql-server" {
datacenters = ["dc1"]
type = "service"
priority = 80
update {
auto_revert = true
}
group "mysql-server" {
count = 1
restart {
attempts = 10
interval = "5m"
delay = "25s"
mode = "delay"
}
network {
mode = "bridge"
port "db" {
static = 3306
}
port "tls" {}
}
volume "mysql-data" {
type = "host"
read_only = false
source = "mysql-data"
}
service {
name = "mysql-server"
provider = "nomad"
port = "db"
}
service {
name = "mysql-tls"
provider = "nomad"
port = "tls"
}
task "mysql-server" {
driver = "docker"
config {
image = "mariadb:10"
ports = ["db"]
args = ["--innodb-buffer-pool-size=1G"]
}
volume_mount {
volume = "mysql-data"
destination = "/var/lib/mysql"
read_only = false
}
env = {
# Allow connections from any host
"MYSQL_ROOT_HOST" = "%"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/mysql-server" }}
MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
{{ end }}
EOH
destination = "${NOMAD_SECRETS_DIR}/db.env"
env = true
}
resources {
cpu = 300
memory = 1600
}
}
task "stunnel" {
driver = "docker"
config {
image = "iamthefij/stunnel:1.0.0"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[mysql_server]
accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:3306
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
EOF
destination = "${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ range nomadVarList "secrets/mysql/allowed_psks" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }}
{{ end -}}
EOF
destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
}
}
}

View File

@ -1,41 +0,0 @@
resource "nomad_job" "mysql-server" {
jobspec = file("${path.module}/mysql.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_acl_policy" "secrets_mysql" {
name = "secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
path "secrets/mysql/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.mysql-server.id
job_id = "mysql-server"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "mysql_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "mysql_mysql_psk" {
path = "secrets/mysql/allowed_psks/mysql"
items = {
psk = "mysql:${resource.random_password.mysql_mysql_psk.result}"
}
}

View File

@ -1,124 +0,0 @@
job "postgres-server" {
datacenters = ["dc1"]
type = "service"
priority = 80
update {
auto_revert = true
}
group "postgres-server" {
count = 1
restart {
attempts = 10
interval = "5m"
delay = "25s"
mode = "delay"
}
network {
mode = "bridge"
port "db" {
static = 5432
}
port "tls" {}
}
volume "postgres-data" {
type = "host"
read_only = false
source = "postgres-data"
}
service {
name = "postgres-server"
provider = "nomad"
port = "db"
}
service {
name = "postgres-tls"
provider = "nomad"
port = "tls"
}
task "postgres-server" {
driver = "docker"
config {
image = "postgres:14"
ports = ["db"]
}
volume_mount {
volume = "postgres-data"
destination = "/var/lib/postgresql/data"
read_only = false
}
env = {
# Allow connections from any host
"MYSQL_ROOT_HOST" = "%"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/postgres-server" }}
POSTGRES_USER={{ .superuser }}
POSTGRES_PASSWORD={{ .superuser_pass }}
{{ end }}
EOH
destination = "secrets/db.env"
env = true
}
resources {
cpu = 500
memory = 800
memory_max = 1500
}
}
task "stunnel" {
driver = "docker"
config {
image = "iamthefij/stunnel:1.0.0"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[postgres_server]
accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:5432
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
EOF
destination = "${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ range nomadVarList "secrets/postgres/allowed_psks" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }}
{{ end -}}
EOF
destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More