Compare commits

..

78 Commits

Author SHA1 Message Date
af05a96501 Add playbook for recovering nomad cluster with peers.json 2022-11-01 15:03:08 -07:00
754c4393f3 Remove query for nomad-clients on backup task
Not used anymore
2022-10-31 15:27:29 -07:00
04adb5db04 Go back to hard coded node names for for_each
For some reason this worked until it didn't
2022-10-31 15:24:14 -07:00
fafe8f0103 Rename backup service fixing mysql access 2022-10-31 15:23:42 -07:00
ca299db949 Fix sonarr dl folder 2022-10-31 11:32:09 -07:00
e7edef528b Refactor external service definitions 2022-10-28 12:42:28 -07:00
2a733aff96 Increase sonarr memory
Wow, this uses a lot of mem
2022-10-28 12:34:44 -07:00
7493fb27ec Big refactor to split core and services for better ordering 2022-10-27 14:28:34 -07:00
c230e05aa8 Better first run bootstrap 2022-09-27 21:59:37 -07:00
7647832434 Fix typo 2022-09-27 21:29:00 -07:00
0a6a9be872 New playbook to reset server data 2022-09-27 21:28:37 -07:00
3e89a101bf Improve first run cluster setup 2022-09-27 21:28:02 -07:00
bfa8633fcf Remove some hosts 2022-09-27 21:27:34 -07:00
7d25cd3098 Add local loopback as dns in resolv.conf 2022-09-26 16:52:59 -07:00
165a8b3a40 Increase priority of stateful jobs 2022-09-26 16:40:25 -07:00
e3ad6c4ff0 Add playbook to recover consul using peers 2022-09-16 16:46:37 -07:00
1654fd7aef Add another pi 2022-09-16 16:46:10 -07:00
ff49e039da Enable preemption on nomad scheduler 2022-09-16 16:45:26 -07:00
845ea4d391 Make Nomad media-read volume point to rw nfs 2022-09-16 16:44:40 -07:00
b9ea8eb0a2 Remove bootstrapping values from setup playbook
This will be done in another playbook
2022-09-16 16:43:45 -07:00
fbacdb2699 Bootstrap with 3 servers 2022-09-16 16:42:54 -07:00
04c76bd5f8 Get ddclient working again
It was failing due to oom and using cloudflare api tokens which aren't yet supported
2022-09-07 12:20:58 -07:00
50bdb61575 Wait until Nomad is running before bootstrapping ACLs 2022-09-07 11:11:10 -07:00
bd1280b970 Clean up comments in setup cluster playbook 2022-09-07 11:09:33 -07:00
ecbd9626e8 Explicitly set envoy version
The default v1.23.0 does not work on arm64
2022-09-07 11:06:26 -07:00
a9bbc3705a Update vault and nomad versions 2022-09-07 11:05:54 -07:00
c8d77e553d Add playbook to unseal Vault 2022-09-07 11:05:27 -07:00
24c04f4217 Make vault load balancer sticky
Assets like css and js were not proxying correctly. I think it may be
because they were proxying to a different instance and that the paths
are dynamic. This should route subsequent requests for the session to
a single backend.
2022-09-06 17:17:14 -07:00
8aed3a877e Add homeassistant external 2022-09-06 17:15:43 -07:00
47f8b18b46 add nomad login 2022-09-06 14:47:06 -07:00
cfc0a45440 Update security todos and reference node IP for consul queries 2022-09-06 14:46:49 -07:00
a57b1ddee5 Move redis data to ephemeral disk 2022-09-06 11:31:15 -07:00
0e5181fcf0 Move prometheus tsdb data to emphemeral disk 2022-09-06 11:15:14 -07:00
fa5f9e28e6 Move acme certs to /local so they will persit between allocs 2022-09-06 09:45:04 -07:00
722b63260c Add splay to blocky template render
Avoid all instances going down at once when the template canges
2022-09-05 12:57:13 -07:00
67df912755 Fix syslog proxy
Apparently traefik only supports http proxy over connect.

https://github.com/traefik/traefik/issues/7803
2022-09-04 20:21:02 -07:00
d62c96fe34 Use nomad as sole metrics exporter
Drops cadvisor and node_exporter since Nomad seems to export what I need.
2022-09-04 14:32:24 -07:00
d5cbe7174e Remove default volume read_only
It was always setting to true
2022-09-04 14:27:28 -07:00
c2c3d1abc7 Update nfs volumes to try and fix permissions 2022-09-04 14:27:27 -07:00
8ce4e3ff14 Try to use default netowrk source for proxing syslogng 2022-09-04 14:27:27 -07:00
a36f411c1b Add Traefik proxy for Syslogng 2022-09-04 14:27:07 -07:00
444782a0a6 Use default arch maps where possible 2022-08-30 16:15:12 -07:00
92a60cbe3b Update services template to support env and host volumes
Also adds sonarr as an example
2022-08-30 15:16:08 -07:00
9c07141dd1 Use nomad token to look up policies 2022-08-30 15:15:29 -07:00
1c57d9f7f6 Have nomad talk to vault over loopback 2022-08-30 15:15:10 -07:00
0ef488b06a Add new nfs volumes 2022-08-30 15:14:55 -07:00
6fe1d472d0 Multiarch install tweaks for arm64 2022-08-30 15:14:39 -07:00
c073f78ed2 Fix unsealing of single vault instance
Checking status of only one node meant that if that node was sealed
we would not try to unseal other nodes
2022-08-30 15:14:00 -07:00
5214d8275a Enable consul autopilot 2022-08-30 15:12:52 -07:00
89598ffb7c Update nomad, consul, vault versions 2022-08-30 15:12:35 -07:00
89e14dbf56 Use newer cadvisor 2022-08-30 15:11:52 -07:00
2a54b5454d Use updated ansible-nomad role
Has better support for multi-arch installs and fixes cni
2022-08-30 15:10:16 -07:00
520986d30c Add pi4 host 2022-08-30 15:09:48 -07:00
9aad3d1594 Rename nomad anon policy file 2022-08-23 10:31:29 -07:00
39107538e9 WIP: Allow specifying https endpoints and fetching nomad token 2022-08-23 09:57:57 -07:00
1c38aa212e Add mysql database storage to Grafana 2022-07-29 13:02:22 -07:00
0d61ebc877 Add Nomad dashboard to grafana 2022-07-29 13:01:59 -07:00
846ea18a16 Don't deploy Nextcloud 2022-07-29 13:01:40 -07:00
6d31c4e6d6 Stop duplicate nomad scraping
Already getting it from Client service
2022-07-29 13:01:22 -07:00
9d57175584 Increase promtail memory 2022-07-28 16:37:19 -07:00
3c0c74797d Make traefik a service rather than a system job
Sets it up to support auto_revert and auto_promote
2022-07-28 15:11:59 -07:00
4b6c388ed9 Traefik wildcard certs 2022-07-28 15:11:24 -07:00
6ccc5a6bcf Remove variable for consul_address for traefik
Now getting from Noamd environment
2022-07-28 15:10:39 -07:00
48d5704b72 Make lldap backup daily 2022-07-28 15:05:00 -07:00
62f59b3929 conditional dns lookups for router assigned domains 2022-07-27 22:04:46 -07:00
c074df4bc7 Working backup and restore 2022-07-27 22:04:22 -07:00
d175166045 Make traefik disk ephemeral and sticky 2022-07-27 17:30:35 -07:00
c8493b1fc5 Bump Traefik mem limit
We don't like this crashing
2022-07-27 17:26:13 -07:00
a3f59145bd Skip dump of lldap db 2022-07-27 17:25:41 -07:00
9a315eb2f7 Add lldap backup and templatize backup job
Now oneoff and system jobs are all using the same template
2022-07-27 17:02:29 -07:00
6e074c55aa Increase prometheus memory limit 2022-07-27 16:11:56 -07:00
ecaee6f8be Add lldap 2022-07-27 15:57:28 -07:00
4213b322c1 Remove set hostname because that's now done in bootstrap 2022-07-27 15:57:12 -07:00
1dd131ba9a Extend ttl for nomad tokens 2022-07-27 15:56:40 -07:00
bc040b4668 Add ddclient 2022-07-27 14:45:08 -07:00
9664802fb6 Clean up services template whitespace 2022-07-27 14:41:42 -07:00
547cd96e4c Add vault stanza to levant services 2022-07-27 14:41:13 -07:00
e39fbc41a7 Add further todos for Nomad Vault 2022-07-27 13:40:21 -07:00
89 changed files with 3037 additions and 5078 deletions

View File

@ -115,10 +115,10 @@
} }
], ],
"results": { "results": {
"nomad/metrics/grafana/grafana.ini": [ "nomad/core/metrics/grafana/grafana.ini": [
{ {
"type": "Basic Auth Credentials", "type": "Basic Auth Credentials",
"filename": "nomad/metrics/grafana/grafana.ini", "filename": "nomad/core/metrics/grafana/grafana.ini",
"hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4", "hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4",
"is_verified": false, "is_verified": false,
"line_number": 78, "line_number": 78,
@ -126,7 +126,7 @@
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "nomad/metrics/grafana/grafana.ini", "filename": "nomad/core/metrics/grafana/grafana.ini",
"hashed_secret": "55ebda65c08313526e7ba08ad733e5ebea9900bd", "hashed_secret": "55ebda65c08313526e7ba08ad733e5ebea9900bd",
"is_verified": false, "is_verified": false,
"line_number": 109, "line_number": 109,
@ -134,7 +134,7 @@
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "nomad/metrics/grafana/grafana.ini", "filename": "nomad/core/metrics/grafana/grafana.ini",
"hashed_secret": "d033e22ae348aeb5660fc2140aec35850c4da997", "hashed_secret": "d033e22ae348aeb5660fc2140aec35850c4da997",
"is_verified": false, "is_verified": false,
"line_number": 151, "line_number": 151,
@ -142,7 +142,7 @@
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "nomad/metrics/grafana/grafana.ini", "filename": "nomad/core/metrics/grafana/grafana.ini",
"hashed_secret": "10bea62ff1e1a7540dc7a6bc10f5fa992349023f", "hashed_secret": "10bea62ff1e1a7540dc7a6bc10f5fa992349023f",
"is_verified": false, "is_verified": false,
"line_number": 154, "line_number": 154,
@ -150,7 +150,7 @@
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "nomad/metrics/grafana/grafana.ini", "filename": "nomad/core/metrics/grafana/grafana.ini",
"hashed_secret": "5718bce97710e6be87ea160b36eaefb5032857d3", "hashed_secret": "5718bce97710e6be87ea160b36eaefb5032857d3",
"is_verified": false, "is_verified": false,
"line_number": 239, "line_number": 239,
@ -158,28 +158,28 @@
}, },
{ {
"type": "Secret Keyword", "type": "Secret Keyword",
"filename": "nomad/metrics/grafana/grafana.ini", "filename": "nomad/core/metrics/grafana/grafana.ini",
"hashed_secret": "10aed9d7ebef778a9b3033dba3f7813b639e0d50", "hashed_secret": "10aed9d7ebef778a9b3033dba3f7813b639e0d50",
"is_verified": false, "is_verified": false,
"line_number": 252, "line_number": 252,
"is_secret": false "is_secret": false
} }
], ],
"nomad/syslogng.nomad": [ "nomad/core/syslogng.nomad": [
{ {
"type": "Base64 High Entropy String", "type": "Base64 High Entropy String",
"filename": "nomad/syslogng.nomad", "filename": "nomad/core/syslogng.nomad",
"hashed_secret": "298b5925fe7c7458cb8a12a74621fdedafea5ad6", "hashed_secret": "298b5925fe7c7458cb8a12a74621fdedafea5ad6",
"is_verified": false, "is_verified": false,
"line_number": 163, "line_number": 159,
"is_secret": false "is_secret": false
}, },
{ {
"type": "Base64 High Entropy String", "type": "Base64 High Entropy String",
"filename": "nomad/syslogng.nomad", "filename": "nomad/core/syslogng.nomad",
"hashed_secret": "3a1cec2d3c3de7e4da4d99c6731ca696c24b72b4", "hashed_secret": "3a1cec2d3c3de7e4da4d99c6731ca696c24b72b4",
"is_verified": false, "is_verified": false,
"line_number": 163, "line_number": 159,
"is_secret": false "is_secret": false
} }
], ],
@ -210,5 +210,5 @@
} }
] ]
}, },
"generated_at": "2022-07-27T03:09:38Z" "generated_at": "2022-10-27T21:28:03Z"
} }

View File

@ -2,21 +2,21 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" { provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.1" version = "2.14.0"
hashes = [ hashes = [
"h1:PexyQBRLDA+SR+sWlzYBZswry5O5h/tTfj87CaECtLc=", "h1:lJWOdlqevg6FQLFlfM3tGOsy9yPrjm9/vqkfzVrqT/A=",
"zh:1806830a3cf103e65e772a7d28fd4df2788c29a029fb2def1326bc777ad107ed", "h1:xRwktNwLL3Vo43F7v73tfcgbcnjCE2KgCzcNrsQJ1cc=",
"zh:252be544fb4c9daf09cad7d3776daf5fa66b62740d3ea9d6d499a7b1697c3433", "zh:06dcca1f76b839af8f86c7b6f65b944003a7a35b30b865b3884f48e2c42f9aee",
"zh:50985fe02a8e5ae47c75d7c28c911b25d7dc4716cff2ed55ca05889ab77a1f73", "zh:16111df6a485e21cee6ca33cb863434baa1ca360c819c8e2af85e465c1361d2b",
"zh:54cf0ec90538703c66937c77e8d72a38d5af47437eb0b8b55eb5836c5d288878", "zh:26b59c82ac2861b2651c1fa31955c3e7790e3c2d5d097f22aa34d3c294da63cf",
"zh:704f536c621337e06fffef6d5f49ac81f52d249f937250527c12884cb83aefed", "zh:70fd6853099126a602d5ac26caa80214a4a8a38f0cad8a5e3b7bef49923419d3",
"zh:896d8ef6d0b555299f124eb25bce8a17d735da14ef21f07582098d301f47da30", "zh:7d4f0061d6fb86e0a5639ed02381063b868245082ec4e3a461bcda964ed00fcc",
"zh:976277a85b0a0baafe267cc494f766448d1da5b6936ddcb3ce393bd4d22f08d2", "zh:a48cbf57d6511922362d5b0f76f449fba7a550c9d0702635fabb43b4f0a09fc0",
"zh:c7faa9a2b11bc45833a3e8e340f22f1ecf01597eaeffa7669234b4549d7dfa85", "zh:bb54994a53dd8e1ff84ca50742ce893863dc166fd41b91d951f4cb89fe6a6bc0",
"zh:caf851ef9c8ce482864badf7058f9278d4537112fa236efd8f1a9315801d9061", "zh:bc61b19ee3c8d55a9915a3ad84203c87bfd0d57eca8eec788524b14e8b67f090",
"zh:db203435d58b0ac842540861b3307a623423275d85754c171773f3b210ae5b24", "zh:cbe3238e756ada23c1e7c97c42a5c72bf810dc5bd1265c9f074c3e739d1090b0",
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5", "zh:e30198054239eab46493e59956b9cd8c376c3bbd9515ac102a96d1fbd32e423f",
"zh:f710a37190429045d109edd35de69db3b5f619919c2fa04c77a3a639fea9fd7d", "zh:e74365dba529a0676107e413986d7be81c2125c197754ce69e3e89d8daa53153",
] ]
} }
@ -40,40 +40,39 @@ provider "registry.terraform.io/hashicorp/external" {
} }
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.17" version = "1.4.16"
hashes = [ hashes = [
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=", "h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a", "h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f", "zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1", "zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792", "zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916", "zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0", "zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85", "zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328", "zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65", "zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d", "zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6", "zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
] ]
} }
provider "registry.terraform.io/hashicorp/vault" { provider "registry.terraform.io/hashicorp/vault" {
version = "3.8.0" version = "3.3.1"
constraints = "3.8.0"
hashes = [ hashes = [
"h1:F+1vJ14D9nNx3sNrCbKxvpJZ+QnVmD1p/ITbYPlkRg4=", "h1:SOTmxGynxFf1hECFq0/FGujGQZNktePze/4mfdR/iiU=",
"zh:2c807352fd061f31d2972f131b74ab2e2c47031760a9f18b6f4b4a699d384969", "h1:i7EC2IF0KParI+JPA5ZtXJrAn3bAntW5gEMLvOXwpW4=",
"zh:3c5d6334c367c41d570f0eb226be0dfbdb31034669b8914b509f145a279c2bfa", "zh:3e1866037f43c1083ff825dce2a9e3853c757bb0121c5ae528ee3cf3f99b4113",
"zh:4ce3887e53cc9536bfd500fac09caaab93084ed145532a521826a5093e7f8dd7", "zh:49636cc5c4939134e098c4ec0163c41fae103f24d7e1e8fc0432f8ad93d596a0",
"zh:6990eac4216fb8d7fcbe0a483cc1c6a077d0e970db84fb1c0b9032158b555c0e", "zh:5258a7001719c4aeb84f4c4da7115b795da4794754938a3c4176a4b578fe93a1",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:7461738691e2e8ea91aba73d4351cfbc30fcaedcf0e332c9d35ef215f93aa282",
"zh:939576f814ee4406131bdd3564cee041b05176d2e0a0b55e8081019348125e76", "zh:815529478e33a6727273b08340a4c62c9aeb3da02abf8f091bb4f545c8451fce",
"zh:a0545395bd6039f7c9998113ada4334717eb1c74fee4ece7da1d4f3e6d5ef7ba", "zh:8e6fede9f5e25b507faf6cacd61b997035b8b62859245861149ddb2990ada8eb",
"zh:a086e5e4fdadcb0492f48074047954cc6c437b9ee57d9ec7ba850fb7cb5455a8", "zh:9acc2387084b9c411e264c4351633bc82f9c4e420f8e6bbad9f87b145351f929",
"zh:c997156a7c23fa06304d7e22cfd64407e9ed69237c5780d20026521ce2be478d", "zh:b9e4af3b06386ceed720f0163a1496088c154aa1430ae072c525ffefa4b37891",
"zh:d47ad773cf50d703450cf301872cbc33938712a5ae491dfebf77611e1bcb0237", "zh:c7d5dfb8f8536694db6740e2a4afd2d681b60b396ded469282524c62ce154861",
"zh:d95de02ccc23416e2eefb689c94046a5dcb4c65ab96cebc61838c5b1ef70e1d3", "zh:d0850be710c6fd682634a2f823beed0164231cc873b1dc09038aa477c926f57c",
"zh:f166c7ed64c12978c4296d477ca508df82791648e6e9ff523268c1d361493851", "zh:e90c2cba9d89db5eab295b2f046f24a53f23002bcfe008633d398fb3fa16d941",
] ]
} }

View File

@ -75,6 +75,11 @@ bootstrap-values: venv/bin/ansible galaxy
$(shell test -f vault-keys.json && echo '-e "@vault-keys.json"') \ $(shell test -f vault-keys.json && echo '-e "@vault-keys.json"') \
-i ansible_hosts.yml -M ./roles ./bootstrap-values.yml -i ansible_hosts.yml -M ./roles ./bootstrap-values.yml
.PHONY: unseal-vault
unseal-vault: venv/bin/ansible galaxy
env VIRTUAL_ENV=/Users/ifij/workspace/iamthefij/orchestration-tests/nomad/venv ./venv/bin/ansible-playbook -K -vv \
-e "@vault-keys.json" -i ansible_hosts.yml -M ./roles ./unseal-vault.yml
.PHONY: init .PHONY: init
init: init:
@terraform init @terraform init

View File

@ -40,21 +40,20 @@ provider "registry.terraform.io/hashicorp/nomad" {
} }
provider "registry.terraform.io/hashicorp/vault" { provider "registry.terraform.io/hashicorp/vault" {
version = "3.8.0" version = "3.7.0"
constraints = "3.8.0"
hashes = [ hashes = [
"h1:F+1vJ14D9nNx3sNrCbKxvpJZ+QnVmD1p/ITbYPlkRg4=", "h1:idawLPCbZgHIb+NRLJs4YdIcQgACqYiT5VwQfChkn+w=",
"zh:2c807352fd061f31d2972f131b74ab2e2c47031760a9f18b6f4b4a699d384969", "zh:256b82692c560c76ad51414a2c003cadfa10338a9df333dbe22dd14a9ed16f95",
"zh:3c5d6334c367c41d570f0eb226be0dfbdb31034669b8914b509f145a279c2bfa", "zh:329ed8135a98bd6a000d014e40bc5981c6868cf50eedf454f1a1f72ac463bdf0",
"zh:4ce3887e53cc9536bfd500fac09caaab93084ed145532a521826a5093e7f8dd7", "zh:3b32c18b492a6ac8e1ccac40d28cd42a88892ef8f3515291676136e3faac351c",
"zh:6990eac4216fb8d7fcbe0a483cc1c6a077d0e970db84fb1c0b9032158b555c0e", "zh:4c5ea8e80543b36b1999257a41c8b9cde852542251de82a94cff2f9d280ac2ec",
"zh:5d968ed305cde7aa3567a943cb2f5f8def54b40a2292b66027b1405a1cf28585",
"zh:60226d1a0a496a9a6c1d646800dd7e1bd1c4f5527e7307ff0bca9f4d0b5395e2",
"zh:71b11def501c994ee5305f24bd47ebfcca2314c5acca3efcdd209373d0068ac0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:939576f814ee4406131bdd3564cee041b05176d2e0a0b55e8081019348125e76", "zh:89be6b5db3be473bfd14422a9abf83245c4b22ce47a8fe463bbebf8e20958ab1",
"zh:a0545395bd6039f7c9998113ada4334717eb1c74fee4ece7da1d4f3e6d5ef7ba", "zh:8f91051d43ae309bb8f3f6a9659f0fd26b1b239faf671c139b4e9ad0d208db05",
"zh:a086e5e4fdadcb0492f48074047954cc6c437b9ee57d9ec7ba850fb7cb5455a8", "zh:b5114983273d3170878f657b92738b2c40953aedeef2e1840588ecaf1bc0827e",
"zh:c997156a7c23fa06304d7e22cfd64407e9ed69237c5780d20026521ce2be478d", "zh:fd56db01c5444dc8ca2e0ad2f13fc4c17735d0fdeb5960e23176fb3f5a5114d3",
"zh:d47ad773cf50d703450cf301872cbc33938712a5ae491dfebf77611e1bcb0237",
"zh:d95de02ccc23416e2eefb689c94046a5dcb4c65ab96cebc61838c5b1ef70e1d3",
"zh:f166c7ed64c12978c4296d477ca508df82791648e6e9ff523268c1d361493851",
] ]
} }

View File

@ -1,15 +0,0 @@
resource "consul_acl_policy" "server_policy" {
name = "consul-servers"
rules = <<EOH
node_prefix "server-" {
policy = "write"
}
node_prefix "" {
policy = "read"
}
service_prefix "" {
policy = "read"
}
EOH
}

View File

@ -1,20 +0,0 @@
resource "vault_consul_secret_backend" "config" {
path = "consul"
description = "Manages the Consul backend"
address = "http://127.0.0.1:8300"
# Using root token here, do consul tokens expire?
token = var.consul_token
}
resource "vault_consul_secret_backend_role" "consul_servers" {
name = "consul-servers"
backend = vault_consul_secret_backend.config.path
consul_policies = [
"consul-servers"
]
max_ttl = 240
ttl = 120
}

View File

@ -1,7 +1,7 @@
resource "nomad_acl_policy" "anon_policy" { resource "nomad_acl_policy" "anon_policy" {
name = "anonymous" name = "anonymous"
description = "Anon RO" description = "Anon RO"
rules_hcl = file("${path.module}/nomad-anon-bootstrap.hcl") rules_hcl = file("${path.module}/nomad-anon-policy.hcl")
} }
resource "nomad_acl_policy" "admin" { resource "nomad_acl_policy" "admin" {
@ -10,9 +10,9 @@ resource "nomad_acl_policy" "admin" {
rules_hcl = file("${path.module}/nomad-admin-policy.hcl") rules_hcl = file("${path.module}/nomad-admin-policy.hcl")
} }
# TODO: Limit this scope # TODO: (security) Limit this scope
resource "nomad_acl_policy" "deploy" { resource "nomad_acl_policy" "deploy" {
name = "deploy" name = "deploy"
description = "Admin RW" description = "Write for job deployments"
rules_hcl = file("${path.module}/nomad-deploy-policy.hcl") rules_hcl = file("${path.module}/nomad-deploy-policy.hcl")
} }

View File

@ -11,8 +11,9 @@ resource "vault_nomad_secret_backend" "config" {
default_lease_ttl_seconds = "3600" default_lease_ttl_seconds = "3600"
max_lease_ttl_seconds = "7200" max_lease_ttl_seconds = "7200"
max_ttl = "240"
ttl = "120" ttl = "3600"
max_ttl = "7200"
} }
# Vault roles generating Nomad tokens # Vault roles generating Nomad tokens

View File

@ -1,12 +1,3 @@
terraform {
required_providers {
vault = {
source = "hashicorp/vault"
version = "3.8.0"
}
}
}
# Configure Consul provider # Configure Consul provider
provider "consul" { provider "consul" {
address = var.consul_address address = var.consul_address

View File

@ -3,19 +3,15 @@ variable "consul_address" {
default = "http://n1.thefij:8500" default = "http://n1.thefij:8500"
} }
variable "consul_token" {
type = string
description = "Token for setting up consul"
sensitive = true
}
variable "nomad_secret_id" { variable "nomad_secret_id" {
type = string type = string
description = "Secret ID for ACL bootstrapped Nomad" description = "Secret ID for ACL bootstrapped Nomad"
sensitive = true sensitive = true
default = ""
} }
variable "vault_token" { variable "vault_token" {
type = string type = string
sensitive = true sensitive = true
default = ""
} }

View File

@ -13,7 +13,14 @@ all:
group: "bin" group: "bin"
mode: "0755" mode: "0755"
read_only: false read_only: false
- name: lldap-data
path: /srv/volumes/lldap
owner: "root"
group: "bin"
mode: "0755"
read_only: false
n2.thefij: n2.thefij:
nomad_node_class: ingress
nomad_node_role: both nomad_node_role: both
nomad_unique_host_volumes: nomad_unique_host_volumes:
- name: nextcloud-data - name: nextcloud-data
@ -28,15 +35,25 @@ all:
group: "bin" group: "bin"
mode: "0755" mode: "0755"
read_only: false read_only: false
- name: authentik-data - name: sonarr-data
path: /srv/volumes/gitea path: /srv/volumes/sonarr
owner: "root" owner: "root"
group: "bin" group: "bin"
mode: "0755" mode: "0755"
read_only: false read_only: false
n3.thefij: - name: nzbget-data
nomad_node_class: ingress path: /srv/volumes/nzbget
nomad_node_role: both owner: "root"
group: "bin"
mode: "0755"
read_only: false
# n3.thefij:
# nomad_node_class: ingress
# nomad_node_role: both
# pi3:
# nomad_node_role: client
# pi4:
# nomad_node_role: client
consul_instances: consul_instances:
children: children:

View File

@ -1,155 +0,0 @@
variable "nextcloud_backup" {
type = string
description = "HCL config for Restic Scheduler jobs"
}
variable "consul_backup" {
type = string
description = "HCL config for Restic Scheduler jobs"
}
job "backup" {
datacenters = ["dc1"]
type = "system"
constraint {
attribute = "${node.unique.name}"
# Only node with a backup job so far
# Remove when backing up all nodes
value = "n2"
}
group "backup" {
network {
mode = "bridge"
port "metrics" {
to = 8080
}
}
volume "all-volumes" {
type = "host"
read_only = true
source = "all-volumes"
}
service {
port = "metrics"
# Add connect to mysql
connect {
sidecar_service {
proxy {
local_service_port = 8080
upstreams {
destination_name = "mysql-server"
local_bind_port = 6060
}
config {
protocol = "tcp"
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
meta {
metrics_addr = "${NOMAD_ADDR_metrics}"
}
}
task "backup" {
driver = "docker"
volume_mount {
volume = "all-volumes"
destination = "/data"
read_only = true
}
config {
image = "iamthefij/resticscheduler"
ports = ["metrics"]
args = [
"/jobs/node-jobs.hcl",
]
mount {
type = "bind"
target = "/jobs"
source = "jobs"
}
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
env = {
"MYSQL_HOST" = "${NOMAD_UPSTREAM_IP_mysql_server}"
"MYSQL_PORT" = "${NOMAD_UPSTREAM_PORT_mysql_server}"
}
template {
# Probably want to use database credentials that have access to dump all tables
data = <<EOF
{{ with secret "kv/data/nextcloud" }}
MYSQL_DATABASE={{ .Data.data.db_name }}
MYSQL_USER={{ .Data.data.db_user }}
MYSQL_PASSWORD={{ .Data.data.db_pass }}
{{ end }}
{{ with secret "kv/data/backups" }}
BACKUP_PASSPHRASE={{ .Data.data.backup_passphrase }}
{{ end }}
EOF
destination = "secrets/db.env"
env = true
}
template {
data = <<EOH
CONSUL_HTTP_ADDR={{ env "attr.unique.network.ip-address" }}:8500
EOH
destination = "local/consul.env"
env = true
}
template {
# Build jobs based on node
data = <<EOF
# Current node is {{ env "node.unique.name" }}
# Consul backup below?
{{ if eq (env "node.unique.name") "n2" -}}
# Consul backup
${var.consul_backup}
{{ end -}}
{{ range service "nextcloud" }}
# Nextcloud .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") }}
${var.nextcloud_backup}
{{ end }}{{ end }}
EOF
destination = "jobs/node-jobs.hcl"
}
resources {
cpu = 50
memory = 256
}
}
}
}

View File

@ -1,27 +0,0 @@
locals {
nextcloud_backup = file("${path.module}/jobs/nextcloud.hcl")
}
resource "nomad_job" "backups" {
hcl2 {
enabled = true
vars = {
"nextcloud_backup" = "${local.nextcloud_backup}",
"consul_backup" = file("${path.module}/jobs/consul.hcl"),
}
}
jobspec = file("${path.module}/backup.nomad")
}
resource "nomad_job" "backups-oneoff" {
hcl2 {
enabled = true
vars = {
"nextcloud_backup" = "${local.nextcloud_backup}",
"consul_backup" = file("${path.module}/jobs/consul.hcl"),
}
}
jobspec = file("${path.module}/oneoff.nomad")
}

View File

@ -1,20 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}

View File

@ -40,9 +40,23 @@
name: hvac name: hvac
extra_args: --index-url https://pypi.org/simple extra_args: --index-url https://pypi.org/simple
# This fails on first run because `root_token` isn't found - name: Check mount
# Fails after taht too because the kv/ space has not been created yet either! Oh noes! community.hashi_vault.vault_read:
# Maybe move data bootstrapping to after the cluster is bootstrapped url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "/sys/mounts/kv"
ignore_errors: true
register: check_mount
- name: Create kv mount
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "/sys/mounts/kv"
data:
type: kv-v2
when: check_mount is not succeeded
- name: Write values - name: Write values
no_log: true no_log: true
community.hashi_vault.vault_write: community.hashi_vault.vault_write:
@ -53,6 +67,8 @@
data: data:
"{{ item.value }}" "{{ item.value }}"
loop: "{{ hashi_vault_values | default({}) | dict2items }}" loop: "{{ hashi_vault_values | default({}) | dict2items }}"
retries: 2
delay: 10
- name: Write userpass - name: Write userpass
no_log: true no_log: true

64
nomad/clear-data.yml Normal file
View File

@ -0,0 +1,64 @@
---
- name: Delete Consul data
hosts: consul_instances
tasks:
- name: Stop consul
systemd:
name: consul
state: stopped
become: true
- name: Stop vault
systemd:
name: consul
state: stopped
become: true
- name: Remove data dir
file:
path: /opt/consul
state: absent
become: true
- name: Delete Nomad data
hosts: nomad_instances
tasks:
- name: Stop nomad
systemd:
name: nomad
state: stopped
become: true
- name: Kill nomad
shell:
cmd: systemctl kill nomad
become: true
- name: Stop all containers
shell:
cmd: docker ps -a | awk '/^[0-9abcdef]/{print $1}' | xargs -r docker stop
become: true
- name: Remove all containers
shell:
cmd: docker ps -a | awk '/^[0-9abcdef]/{print $1}' | xargs -r docker rm
become: true
- name: Unmount secrets
shell:
cmd: mount | awk '/nomad/ {print $3}' | xargs -n1 -r umount
become: true
- name: Remove data dir
file:
path: /var/nomad
state: absent
become: true
- name: Remove data dir
file:
path: /opt/nomad/data
state: absent
become: true

View File

@ -2,116 +2,11 @@ module "databases" {
source = "./databases" source = "./databases"
} }
module "blocky" { module "core" {
source = "./blocky" source = "./core"
base_hostname = var.base_hostname base_hostname = var.base_hostname
# Metrics and Blocky depend on databases
depends_on = [module.databases] depends_on = [module.databases]
} }
module "traefik" {
source = "./traefik"
consul_address = var.consul_address
base_hostname = var.base_hostname
}
module "metrics" {
source = "./metrics"
consul_address = var.consul_address
}
module "loki" {
source = "./levant"
template_path = "service.nomad"
variables = {
name = "loki"
image = "grafana/loki:2.2.1"
service_port = 3100
ingress = true
sticky_disk = true
healthcheck = "/ready"
templates = jsonencode([
{
data = file("./loki-config.yml")
dest = "/etc/loki/local-config.yaml"
}
])
}
}
resource "consul_config_entry" "loki_intent" {
name = "loki"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "grafana"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "promtail"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "syslogng-promtail"
Precedence = 9
Type = "consul"
},
]
})
}
resource "nomad_job" "syslog-ng" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/syslogng.nomad")
}
resource "consul_config_entry" "syslogng_promtail_intent" {
name = "syslogng-promtail"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "syslogng"
Precedence = 9
Type = "consul"
},
]
})
}
resource "consul_config_entry" "global_access" {
name = "*"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "traefik"
Precedence = 6
Type = "consul"
},
{
Action = "deny"
Name = "*"
Precedence = 5
Type = "consul"
},
]
})
}

View File

@ -0,0 +1,59 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" {
version = "2.16.2"
hashes = [
"h1:epldE7sZPBTQHnWEA4WlNJIOVT1UEX+/02SMg5nniaE=",
"zh:0a2e11ca2ba650954951a087a1daec95eee2f3000456b295409a9880c4a10b1a",
"zh:34f6bda06a0d1c213fa8d87d4313687681e67bc8c40c4cbaa7dbe59ce24a4f7e",
"zh:5b85cf93db11ee890f720c317a38158927071feb634855786a0c0cd65825a43c",
"zh:75ef915f3d087e6045751a66fbb7066a852a0944ec8c97200d1134dd84df7ffc",
"zh:8a4a95697bd91ad51a581c12fe50ac61a114afba27895d027f77ac4154a7ea15",
"zh:973d538c8d72793861a1ac9718249a9493f417a2b5096846367560054fd843b9",
"zh:9feb2bdc06fdc2d8370cc9aad9a0c69e7e5ae38aac43f315c3f57507c57be030",
"zh:c5709672d0afecbbe298bf519741ebcb9d04f02a73b5ee0c186dfa241aa5a524",
"zh:c65c60570de6da7190e1e7762577655a463caeb59bc5d38e33034821ed0cbcb9",
"zh:c958d6282650fc472aade61d5df4300936033f43cfb898293ef86aceccdfdf1d",
"zh:cdd3632c81e1d11d3becd193aaa061688840f39147950c45c4301d042743ae6a",
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
]
}
provider "registry.terraform.io/hashicorp/external" {
version = "2.2.2"
hashes = [
"h1:e7RpnZ2PbJEEPnfsg7V0FNwbfSk0/Z3FdrLsXINBmDY=",
"zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca",
"zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28",
"zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b",
"zh:719d6ef39c50e4cffc67aa67d74d195adaf42afcf62beab132dafdb500347d39",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:7fbfc4d37435ac2f717b0316f872f558f608596b389b895fcb549f118462d327",
"zh:8ac71408204db606ce63fe8f9aeaf1ddc7751d57d586ec421e62d440c402e955",
"zh:a4cacdb06f114454b6ed0033add28006afa3f65a0ea7a43befe45fc82e6809fb",
"zh:bb5ce3132b52ae32b6cc005bc9f7627b95259b9ffe556de4dad60d47d47f21f0",
"zh:bb60d2976f125ffd232a7ccb4b3f81e7109578b23c9c6179f13a11d125dca82a",
"zh:f9540ecd2e056d6e71b9ea5f5a5cf8f63dd5c25394b9db831083a9d4ea99b372",
"zh:ffd998b55b8a64d4335a090b6956b4bf8855b290f7554dd38db3302de9c41809",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.19"
hashes = [
"h1:EdBny2gaLr/IE+l+6csyCKeIGFMYZ/4tHKpcbS7ArgE=",
"zh:2f3ceeb3318a6304026035b0ac9ee3e52df04913bb9ee78827e58c5398b41254",
"zh:3fbe76c7d957d20dfe3c8c0528b33084651f22a95be9e0452b658e0922916e2a",
"zh:595671a05828cfe6c42ef73aac894ac39f81a52cc662a76f37eb74ebe04ddf75",
"zh:5d76e8788d2af3e60daf8076babf763ec887480bbb9734baccccd8fcddf4f03e",
"zh:676985afeaca6e67b22d60d43fd0ed7055763029ffebc3026089fe2fd3b4a288",
"zh:69152ce6164ac999a640cff962ece45208270e1ac37c10dac484eeea5cf47275",
"zh:6da0b15c05b81f947ec8e139bd81eeeb05c0d36eb5a967b985d0625c60998b40",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:822c0a3bbada5e38099a379db8b2e339526843699627c3be3664cc3b3752bab7",
"zh:af23af2f98a84695b25c8eba7028a81ad4aad63c44aefb79e01bbe2dc82e7f78",
"zh:e36cac9960b7506d92925b667254322520966b9c3feb3ca6102e57a1fb9b1761",
"zh:ffd1e096c1cc35de879c740a91918e9f06b627818a3cb4b1d87b829b54a6985f",
]
}

View File

@ -108,6 +108,7 @@ job "blocky" {
template { template {
data = var.config_data data = var.config_data
destination = "app/config.yml" destination = "app/config.yml"
splay = "1m"
} }
} }
} }

View File

@ -17,6 +17,15 @@ upstream:
- https://dns10.quad9.net/dns-query - https://dns10.quad9.net/dns-query
- tcp-tls:dns10.quad9.net - tcp-tls:dns10.quad9.net
conditional:
mapping:
home.arpa: 192.168.2.1
in-addr.arpa: 192.168.2.1
iot: 192.168.2.1
local: 192.168.2.1
thefij: 192.168.2.1
.: 192.168.2.1
blocking: blocking:
blackLists: blackLists:
ads: ads:

55
nomad/core/ddclient.nomad Normal file
View File

@ -0,0 +1,55 @@
job "ddclient" {
datacenters = ["dc1"]
type = "service"
group "ddclient" {
task "ddclient" {
driver = "docker"
config {
image = "linuxserver/ddclient:3.9.1"
mount {
type = "bind"
source = "secrets/ddclient.conf"
target = "/config/ddclient.conf"
}
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
template {
data = <<EOH
daemon=900
ssl=yes
use=web
protocol=cloudflare,
zone={{ key "ddclient/zone" }},
ttl=1,
{{ with secret "kv/data/cloudflare" -}}
login={{ .Data.data.api_user }},
password={{ .Data.data.api_key }}
# login=token,
# password={{ .Data.data.api_token_dns_edit_all }}
{{ end -}}
{{ key "ddclient/domain" }}
EOH
destination = "secrets/ddclient.conf"
change_mode = "restart"
}
resources {
cpu = 50
memory = 50
memory_max = 100
}
}
}
}

140
nomad/core/lldap.nomad Normal file
View File

@ -0,0 +1,140 @@
job "lldap" {
datacenters = ["dc1"]
type = "service"
group "lldap" {
network {
mode = "bridge"
port "web" {
host_network = "loopback"
to = 17170
}
port "ldap" {
host_network = "loopback"
to = 3890
}
}
volume "lldap-data" {
type = "host"
read_only = false
source = "lldap-data"
}
service {
name = "lldap"
port = "ldap"
connect {
sidecar_service {
proxy {
local_service_port = 3890
config {
protocol = "tcp"
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
}
}
}
}
service {
name = "ldap-admin"
port = "web"
connect {
sidecar_service {
proxy {
local_service_port = 17170
}
}
sidecar_task {
resources {
cpu = 20
memory = 20
}
}
}
tags = [
"traefik.enable=true",
"traefik.http.routers.ldap-admin.entryPoints=websecure",
]
}
task "lldap" {
driver = "docker"
volume_mount {
volume = "lldap-data"
destination = "/data"
read_only = false
}
config {
image = "nitnelave/lldap"
ports = ["ldap", "web"]
args = ["run", "--config-file", "/lldap_config.toml"]
mount {
type = "bind"
source = "secrets/lldap_config.toml"
target = "/lldap_config.toml"
}
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
template {
data = <<EOH
database_url = "sqlite:///data/users.db?mode=rwc"
key_file = "/data/private_key"
ldap_base_dn = "{{ keyOrDefault "global/ldap/base_dn" "dc=example,dc=com" }}"
{{ with secret "kv/data/lldap" -}}
jwt_secret = "{{ .Data.data.jwt_secret }}"
ldap_user_dn = "{{ .Data.data.admin_user }}"
ldap_user_email = "{{ .Data.data.admin_email }}"
ldap_user_pass = "{{ .Data.data.admin_password }}"
{{ end -}}
{{ with secret "kv/data/smtp" -}}
[smtp_options]
enable_password_reset = true
server = "{{ .Data.data.server }}"
port = {{ .Data.data.port }}
tls_required = {{ .Data.data.tls }}
user = "{{ .Data.data.user }}"
password = "{{ .Data.data.password }}"
{{ with secret "kv/data/lldap" -}}
from = "{{ .Data.data.smtp_from }}"
reply_to = "{{ .Data.data.smtp_reply_to }}"
{{ end -}}
{{ end -}}
EOH
destination = "secrets/lldap_config.toml"
change_mode = "restart"
}
resources {
cpu = 10
memory = 20
memory_max = 100
}
}
}
}

134
nomad/core/main.tf Normal file
View File

@ -0,0 +1,134 @@
module "blocky" {
source = "./blocky"
base_hostname = var.base_hostname
# Not in this module
# depends_on = [module.databases]
}
module "traefik" {
source = "./traefik"
base_hostname = var.base_hostname
}
module "nomad_login" {
source = "../levant"
template_path = "service.nomad"
variables = {
name = "nomad-login"
image = "iamthefij/nomad-vault-login"
service_port = 5000
ingress = true
ingress_rule = "Host(`nomad.thefij.rocks`) && PathPrefix(`/login`)"
env = jsonencode({
VAULT_ADDR = "http://$${attr.unique.network.ip-address}:8200",
})
}
}
module "metrics" {
source = "./metrics"
# Not in this module
# depends_on = [module.databases]
}
module "loki" {
source = "../levant"
template_path = "service.nomad"
variables = {
name = "loki"
image = "grafana/loki:2.2.1"
service_port = 3100
ingress = true
sticky_disk = true
healthcheck = "/ready"
templates = jsonencode([
{
data = file("${path.module}/loki-config.yml")
dest = "/etc/loki/local-config.yaml"
}
])
}
}
resource "consul_config_entry" "loki_intent" {
name = "loki"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "grafana"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "promtail"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "syslogng-promtail"
Precedence = 9
Type = "consul"
},
]
})
}
resource "nomad_job" "syslog-ng" {
jobspec = file("${path.module}/syslogng.nomad")
}
resource "nomad_job" "ddclient" {
jobspec = file("${path.module}/ddclient.nomad")
}
resource "nomad_job" "lldap" {
jobspec = file("${path.module}/lldap.nomad")
}
resource "consul_config_entry" "syslogng_promtail_intent" {
name = "syslogng-promtail"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "syslogng"
Precedence = 9
Type = "consul"
},
]
})
}
resource "consul_config_entry" "global_access" {
name = "*"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "traefik"
Precedence = 6
Type = "consul"
},
{
Action = "deny"
Name = "*"
Precedence = 5
Type = "consul"
},
]
})
}

View File

@ -0,0 +1,150 @@
job "metrics" {
datacenters = ["dc1"]
type = "system"
group "promtail" {
network {
mode = "bridge"
port "promtail" {
to = 9080
}
}
service {
name = "promtail"
port = "promtail"
meta {
metrics_addr = "${NOMAD_ADDR_promtail}"
nomad_dc = "${NOMAD_DC}"
nomad_node_name = "${node.unique.name}"
}
connect {
sidecar_service {
proxy {
local_service_port = 9080
upstreams {
destination_name = "loki"
local_bind_port = 1000
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
}
}
}
check {
type = "http"
path = "/metrics"
port = "promtail"
interval = "10s"
timeout = "10s"
}
}
task "promtail" {
driver = "docker"
config {
image = "grafana/promtail:2.2.1"
args = ["-config.file=/etc/promtail/promtail.yml"]
ports = ["promtail"]
# Mount config
mount {
type = "bind"
target = "/etc/promtail/promtail.yml"
source = "local/promtail.yml"
}
# Bind mount host machine-id and log directories
mount {
type = "bind"
source = "/etc/machine-id"
target = "/etc/machine-id"
readonly = true
}
mount {
type = "bind"
source = "/var/log/journal/"
target = "/var/log/journal/"
readonly = true
}
mount {
type = "bind"
source = "/run/log/journal/"
target = "/run/log/journal/"
readonly = true
}
# mount {
# type = "bind"
# source = "/var/log/audit"
# target = "/var/log/audit"
# readonly = true
# }
}
template {
data = <<EOF
---
server:
http_listen_address: 0.0.0.0
http_listen_port: 9080
clients:
# loki upstream: {{ env "NOMAD_UPSTREAM_ADDR_loki" }}
- url: http://{{ env "NOMAD_UPSTREAM_ADDR_loki" }}/loki/api/v1/push
scrape_configs:
- job_name: journal
journal:
json: false
max_age: 12h
path: /var/log/journal
labels:
job: systemd-journal
relabel_configs:
- source_labels: ['__journal__systemd_unit']
target_label: unit
- source_labels: ['__journal__hostname']
target_label: hostname
- source_labels: ['__journal__transport']
target_label: journal_transport
# Docker log labels
- source_labels: ['__journal_syslog_identifier']
target_label: syslog_identifier
- source_labels: ['__journal_image_name']
target_label: docker_image_name
- source_labels: ['__journal_container_name']
target_label: docker_container_name
- source_labels: ['__journal_container_id']
target_label: docker_container_id
- source_labels: ['__journal_com_docker_compose_project']
target_label: docker_compose_project
- source_labels: ['__journal_com_docker_compose_service']
target_label: docker_compose_service
EOF
destination = "local/promtail.yml"
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -38,6 +38,11 @@ job "grafana" {
destination_name = "loki" destination_name = "loki"
local_bind_port = 3100 local_bind_port = 3100
} }
upstreams {
destination_name = "mysql-server"
local_bind_port = 6060
}
} }
} }
@ -62,6 +67,70 @@ job "grafana" {
] ]
} }
task "grafana-bootstrap" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "mysql:8"
args = [
"/bin/bash",
"-c",
"/usr/bin/mysql --defaults-extra-file=/task/my.cnf < /task/bootstrap.sql",
]
mount {
type = "bind"
source = "local/"
target = "/task/"
}
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
template {
data = <<EOF
[client]
host={{ env "NOMAD_UPSTREAM_IP_mysql_server" }}
port={{ env "NOMAD_UPSTREAM_PORT_mysql_server" }}
user=root
{{ with secret "kv/data/mysql" }}
password={{ .Data.data.root_password }}
{{ end }}
EOF
destination = "local/my.cnf"
}
template {
data = <<EOF
{{ with secret "kv/data/grafana" -}}
{{ if .Data.data.db_name -}}
CREATE DATABASE IF NOT EXISTS `{{ .Data.data.db_name }}`;
CREATE USER IF NOT EXISTS '{{ .Data.data.db_user }}'@'%' IDENTIFIED BY '{{ .Data.data.db_pass }}';
GRANT ALL ON `{{ .Data.data.db_name }}`.* to '{{ .Data.data.db_user }}'@'%';
{{ else -}}
SELECT 'NOOP';
{{ end -}}
{{ end -}}
EOF
destination = "local/bootstrap.sql"
}
resources {
cpu = 50
memory = 50
}
}
task "grafana" { task "grafana" {
driver = "docker" driver = "docker"
@ -89,19 +158,27 @@ job "grafana" {
template { template {
data = <<EOF data = <<EOF
{{ with secret "kv/data/grafana" }} {{ with secret "kv/data/grafana" -}}
GF_SECURITY_ADMIN_PASSWORD={{ .Data.data.admin_pw }} GF_SECURITY_ADMIN_PASSWORD={{ .Data.data.admin_pw }}
GF_SMTP_USER={{ .Data.data.smtp_user }} GF_SMTP_USER={{ .Data.data.smtp_user }}
GF_SMTP_PASSWORD={{ .Data.data.smtp_password }} GF_SMTP_PASSWORD={{ .Data.data.smtp_password }}
GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .Data.data.minio_access_key }} GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .Data.data.minio_access_key }}
GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .Data.data.minio_secret_key }} GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .Data.data.minio_secret_key }}
GRAFANA_ALERT_EMAIL_ADDRESSES={{ .Data.data.alert_email_addresses }} GRAFANA_ALERT_EMAIL_ADDRESSES={{ .Data.data.alert_email_addresses }}
{{ end }} {{ if .Data.data.db_name -}}
{{ with secret "kv/data/slack" }} # Database storage
GF_DATABASE_TYPE=mysql
GF_DATABASE_HOST={{ env "NOMAD_UPSTREAM_ADDR_mysql_server" }}
GF_DATABASE_NAME={{ .Data.data.db_name }}
GF_DATABASE_USER={{ .Data.data.db_user }}
GF_DATABASE_PASSWORD={{ .Data.data.db_pass }}
{{ end -}}
{{ end -}}
{{ with secret "kv/data/slack" -}}
SLACK_BOT_URL={{ .Data.data.bot_url }} SLACK_BOT_URL={{ .Data.data.bot_url }}
SLACK_BOT_TOKEN={{ .Data.data.bot_token }} SLACK_BOT_TOKEN={{ .Data.data.bot_token }}
SLACK_HOOK_URL={{ .Data.data.hook_url }} SLACK_HOOK_URL={{ .Data.data.hook_url }}
{{ end }} {{ end -}}
EOF EOF
env = true env = true
destination = "secrets/conf.env" destination = "secrets/conf.env"

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,3 @@
variable "consul_address" {
type = string
description = "address of consul server for dynamic scraping"
}
resource "nomad_job" "exporters" { resource "nomad_job" "exporters" {
hcl2 { hcl2 {
enabled = true enabled = true
@ -20,12 +15,6 @@ data "consul_nodes" "all-nodes" {
resource "nomad_job" "prometheus" { resource "nomad_job" "prometheus" {
hcl2 { hcl2 {
enabled = true enabled = true
vars = {
# TODO: May not need this because we have an env variable for that
# "consul_address" = "${var.consul_address}",
# TODO: Should this be a list?
"consul_address" = "http://${data.consul_nodes.all-nodes.nodes[0].address}:8500",
}
} }
jobspec = file("${path.module}/prometheus.nomad") jobspec = file("${path.module}/prometheus.nomad")

View File

@ -1,9 +1,3 @@
variable "consul_address" {
type = string
description = "Full address of Consul instance to get catalog from"
default = "http://127.0.0.1:5400"
}
job "prometheus" { job "prometheus" {
datacenters = ["dc1"] datacenters = ["dc1"]
@ -65,7 +59,7 @@ job "prometheus" {
ports = ["web"] ports = ["web"]
args = [ args = [
"--config.file=/etc/prometheus/config/prometheus.yml", "--config.file=/etc/prometheus/config/prometheus.yml",
"--storage.tsdb.path=/prometheus", "--storage.tsdb.path=${NOMAD_ALLOC_DIR}/data/tsdb",
"--web.listen-address=0.0.0.0:9090", "--web.listen-address=0.0.0.0:9090",
"--web.console.libraries=/usr/share/prometheus/console_libraries", "--web.console.libraries=/usr/share/prometheus/console_libraries",
"--web.console.templates=/usr/share/prometheus/consoles", "--web.console.templates=/usr/share/prometheus/consoles",
@ -91,27 +85,13 @@ scrape_configs:
- targets: - targets:
- 0.0.0.0:9090 - 0.0.0.0:9090
- job_name: "nomad_server"
metrics_path: "/v1/metrics"
params:
format:
- "prometheus"
consul_sd_configs:
- server: "${var.consul_address}"
# - server: "{{ env "CONSUL_HTTP_ADDR" }}"
services:
- "nomad"
tags:
- "http"
- job_name: "nomad_client" - job_name: "nomad_client"
metrics_path: "/v1/metrics" metrics_path: "/v1/metrics"
params: params:
format: format:
- "prometheus" - "prometheus"
consul_sd_configs: consul_sd_configs:
- server: "${var.consul_address}" - server: "http://{{env "attr.unique.network.ip-address"}}:8500"
# - server: "{{ env "CONSUL_HTTP_ADDR" }}"
services: services:
- "nomad-client" - "nomad-client"
@ -121,8 +101,7 @@ scrape_configs:
format: format:
- "prometheus" - "prometheus"
consul_sd_configs: consul_sd_configs:
- server: "${var.consul_address}" - server: "http://{{env "attr.unique.network.ip-address"}}:8500"
# - server: "{{ env "CONSUL_HTTP_ADDR" }}"
services: services:
- "consul" - "consul"
relabel_configs: relabel_configs:
@ -133,8 +112,7 @@ scrape_configs:
- job_name: "exporters" - job_name: "exporters"
metrics_path: "/metrics" metrics_path: "/metrics"
consul_sd_configs: consul_sd_configs:
- server: "${var.consul_address}" - server: "http://{{env "attr.unique.network.ip-address"}}:8500"
# - server: "{{ env "CONSUL_HTTP_ADDR" }}"
relabel_configs: relabel_configs:
- source_labels: [__meta_consul_service] - source_labels: [__meta_consul_service]
action: drop action: drop
@ -156,8 +134,7 @@ scrape_configs:
- job_name: "envoy" - job_name: "envoy"
metrics_path: "/metrics" metrics_path: "/metrics"
consul_sd_configs: consul_sd_configs:
- server: "${var.consul_address}" - server: "http://{{env "attr.unique.network.ip-address"}}:8500"
# - server: "{{ env "CONSUL_HTTP_ADDR" }}"
relabel_configs: relabel_configs:
- source_labels: [__meta_consul_service] - source_labels: [__meta_consul_service]
action: keep action: keep
@ -183,7 +160,7 @@ scrape_configs:
resources { resources {
cpu = 100 cpu = 100
memory = 200 memory = 300
} }
} }
} }

View File

@ -92,17 +92,10 @@ EOF
group "syslogng" { group "syslogng" {
count = 1 count = 1
constraint {
attribute = "${node.unique.name}"
# Needs to be on a predictable node for routing
# Maybe a loadbalancer could be used for routing from any node
value = "n2"
}
network { network {
mode = "bridge" mode = "bridge"
port "main" { port "main" {
static = 1514 to = 514
} }
} }
@ -113,6 +106,8 @@ EOF
connect { connect {
sidecar_service { sidecar_service {
proxy { proxy {
local_service_port = 514
upstreams { upstreams {
destination_name = "syslogng-promtail" destination_name = "syslogng-promtail"
local_bind_port = 1000 local_bind_port = 1000
@ -147,11 +142,12 @@ EOF
template { template {
data = <<EOF data = <<EOF
@version: 3.22 @version: 3.37
@include "scl.conf"
source s_external { source s_network {
syslog(ip(0.0.0.0) port(1514) transport("tcp")); default-network-drivers(
syslog(ip(0.0.0.0) port(1514) transport("udp")); );
}; };
source s_internal { source s_internal {
@ -164,7 +160,7 @@ destination d_loki {
}; };
log { source(s_internal); destination(d_loki); }; log { source(s_internal); destination(d_loki); };
log { source(s_external); destination(d_loki); }; log { source(s_network); destination(d_loki); };
EOF EOF
destination = "local/syslog-ng.conf" destination = "local/syslog-ng.conf"
} }

View File

@ -0,0 +1,21 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.17"
hashes = [
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
]
}

View File

@ -1,9 +1,3 @@
variable "consul_address" {
type = string
description = "Full address of Consul instance to get catalog from"
default = "http://127.0.0.1:5400"
}
variable "base_hostname" { variable "base_hostname" {
type = string type = string
description = "Base hostname to serve content from" description = "Base hostname to serve content from"
@ -12,7 +6,7 @@ variable "base_hostname" {
job "traefik" { job "traefik" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "system" type = "service"
priority = 100 priority = 100
constraint { constraint {
@ -20,20 +14,37 @@ job "traefik" {
value = "ingress" value = "ingress"
} }
constraint {
distinct_hosts = true
}
update { update {
max_parallel = 1 max_parallel = 1
# canary = 1
# auto_promote = true
auto_revert = true auto_revert = true
} }
group "traefik" { group "traefik" {
count = 1
network { network {
port "web" { port "web" {
static = 80 static = 80
} }
port "websecure" { port "websecure" {
static = 443 static = 443
} }
port "syslog" {
static = 514
}
}
ephemeral_disk {
migrate = true
sticky = true
} }
service { service {
@ -107,11 +118,19 @@ job "traefik" {
[entryPoints.websecure.http.tls] [entryPoints.websecure.http.tls]
<< if keyExists "traefik/acme/email" ->> << if keyExists "traefik/acme/email" ->>
certResolver = "letsEncrypt" certResolver = "letsEncrypt"
[[entryPoints.websecure.http.tls.domains]]
main = "*.<< keyOrDefault "global/base_hostname" "${var.base_hostname}" >>"
<< end ->> << end ->>
[entryPoints.metrics] [entryPoints.metrics]
address = ":8989" address = ":8989"
[entryPoints.syslogtcp]
address = ":514"
[entryPoints.syslogudp]
address = ":514/udp"
[api] [api]
dashboard = true dashboard = true
@ -138,7 +157,8 @@ job "traefik" {
<< if keyExists "traefik/acme/email" ->> << if keyExists "traefik/acme/email" ->>
[certificatesResolvers.letsEncrypt.acme] [certificatesResolvers.letsEncrypt.acme]
email = "<< key "traefik/acme/email" >>" email = "<< key "traefik/acme/email" >>"
storage = "acme.json" # Store in /local because /secrets doesn't persist with ephemeral disk
storage = "/local/acme.json"
[certificatesResolvers.letsEncrypt.acme.dnsChallenge] [certificatesResolvers.letsEncrypt.acme.dnsChallenge]
provider = "cloudflare" provider = "cloudflare"
resolvers = ["1.1.1.1:53", "8.8.8.8:53"] resolvers = ["1.1.1.1:53", "8.8.8.8:53"]
@ -200,6 +220,7 @@ CF_ZONE_API_TOKEN={{ .Data.data.api_token_zone_read }}
{{ with service "vault" -}} {{ with service "vault" -}}
[http.services.vault] [http.services.vault]
[http.services.vault.loadBalancer] [http.services.vault.loadBalancer]
[http.services.vault.loadBalancer.sticky.cookie]
{{ range . -}} {{ range . -}}
[[http.services.vault.loadBalancer.servers]] [[http.services.vault.loadBalancer.servers]]
url = "http://{{ .Address }}:{{ .Port }}" url = "http://{{ .Address }}:{{ .Port }}"
@ -210,6 +231,43 @@ CF_ZONE_API_TOKEN={{ .Data.data.api_token_zone_read }}
change_mode = "noop" change_mode = "noop"
} }
template {
data = <<EOH
{{ with service "syslogng" -}}
[tcp.routers]
[tcp.routers.syslogtcp]
entryPoints = ["syslogtcp"]
service = "syslogngtcp"
rule = "HostSNI(`*`)"
[tcp.services]
[tcp.services.syslogngtcp]
[tcp.services.syslogngtcp.loadBalancer]
{{ range . -}}
[[tcp.services.syslogngtcp.loadBalancer.servers]]
address = "{{ .Address }}:{{ .Port }}"
{{ end -}}
{{ end }}
{{ with service "syslogng" -}}
[udp.routers]
[udp.routers.syslogudp]
entryPoints = ["syslogudp"]
service = "syslogngudp"
[udp.services]
[udp.services.syslogngudp]
[udp.services.syslogngudp.loadBalancer]
{{ range . -}}
[[udp.services.syslogngudp.loadBalancer.servers]]
address = "{{ .Address }}:{{ .Port }}"
{{ end -}}
{{ end }}
EOH
destination = "local/config/conf/route-syslog-ng.toml"
change_mode = "noop"
}
template { template {
data = <<EOH data = <<EOH
[http.middlewares] [http.middlewares]
@ -237,7 +295,7 @@ CF_ZONE_API_TOKEN={{ .Data.data.api_token_zone_read }}
resources { resources {
cpu = 100 cpu = 100
memory = 100 memory = 100
memory_max = 200 memory_max = 500
} }
} }
} }

View File

@ -0,0 +1,16 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
resource "nomad_job" "traefik" {
hcl2 {
enabled = true
vars = {
"base_hostname" = "${var.base_hostname}",
}
}
jobspec = file("${path.module}/traefik.nomad")
}

5
nomad/core/vars.tf Normal file
View File

@ -0,0 +1,5 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}

View File

@ -0,0 +1,40 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.1"
hashes = [
"h1:PexyQBRLDA+SR+sWlzYBZswry5O5h/tTfj87CaECtLc=",
"zh:1806830a3cf103e65e772a7d28fd4df2788c29a029fb2def1326bc777ad107ed",
"zh:252be544fb4c9daf09cad7d3776daf5fa66b62740d3ea9d6d499a7b1697c3433",
"zh:50985fe02a8e5ae47c75d7c28c911b25d7dc4716cff2ed55ca05889ab77a1f73",
"zh:54cf0ec90538703c66937c77e8d72a38d5af47437eb0b8b55eb5836c5d288878",
"zh:704f536c621337e06fffef6d5f49ac81f52d249f937250527c12884cb83aefed",
"zh:896d8ef6d0b555299f124eb25bce8a17d735da14ef21f07582098d301f47da30",
"zh:976277a85b0a0baafe267cc494f766448d1da5b6936ddcb3ce393bd4d22f08d2",
"zh:c7faa9a2b11bc45833a3e8e340f22f1ecf01597eaeffa7669234b4549d7dfa85",
"zh:caf851ef9c8ce482864badf7058f9278d4537112fa236efd8f1a9315801d9061",
"zh:db203435d58b0ac842540861b3307a623423275d85754c171773f3b210ae5b24",
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
"zh:f710a37190429045d109edd35de69db3b5f619919c2fa04c77a3a639fea9fd7d",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.17"
hashes = [
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
]
}

View File

@ -25,7 +25,6 @@ job "adminer" {
upstreams { upstreams {
destination_name = "mysql-server" destination_name = "mysql-server"
# TODO: how do I get these to not bind to the host eth0 address
local_bind_port = 4040 local_bind_port = 4040
} }

View File

@ -1,6 +1,7 @@
job "mysql-server" { job "mysql-server" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "service"
priority = 80
group "mysql-server" { group "mysql-server" {
count = 1 count = 1

View File

@ -42,6 +42,12 @@ resource "consul_config_entry" "mysql_intents" {
Precedence = 9 Precedence = 9
Type = "consul" Type = "consul"
}, },
{
Action = "allow"
Name = "grafana"
Precedence = 9
Type = "consul"
},
] ]
}) })
} }

View File

@ -1,6 +1,7 @@
job "redis" { job "redis" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "service"
priority = 60
group "cache" { group "cache" {
count = 1 count = 1
@ -52,7 +53,7 @@ job "redis" {
config { config {
image = "redis:6" image = "redis:6"
args = ["redis-server", "--save", "60", "1", "--loglevel", "warning"] args = ["redis-server", "--save", "60", "1", "--loglevel", "warning", "--dir", "${NOMAD_ALLOC_DIR}/data"]
ports = ["main"] ports = ["main"]
} }

View File

@ -1,363 +0,0 @@
job "metrics" {
datacenters = ["dc1"]
type = "system"
group "cadvisor" {
network {
mode = "bridge"
port "cadvisor" {
to = 8080
}
port "expose" {
}
port "cadvisor_envoy_metrics" {
to = 9102
}
}
service {
name = "cadvisor"
port = "cadvisor"
meta {
metrics_addr = "${NOMAD_ADDR_expose}"
envoy_metrics_addr = "${NOMAD_ADDR_cadvisor_envoy_metrics}"
nomad_dc = "${NOMAD_DC}"
nomad_node_name = "${node.unique.name}"
}
connect {
sidecar_service {
proxy {
local_service_port = 8080
expose {
path {
path = "/metrics"
protocol = "http"
local_path_port = 8080
listener_port = "expose"
}
}
config {
envoy_prometheus_bind_addr = "0.0.0.0:9102"
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
}
}
}
check {
type = "http"
path = "/metrics"
port = "cadvisor"
interval = "10s"
timeout = "10s"
}
}
task "cadvisor" {
driver = "docker"
config {
# image = "iamthefij/cadvisor:0.37.5"
image = "gcr.io/cadvisor/cadvisor:v0.39.3"
args = ["--docker_only=true"]
ports = ["cadvisor"]
# volumes = [
# "/:/rootfs:ro",
# "/var/run:/var/run:rw",
# "/sys:/sys:ro",
# "/var/lib/docker/:/var/lib/docker:ro",
# "/cgroup:/cgroup:ro",
# "/etc/machine-id:/etc/machine-id:ro",
# ]
mount {
type = "bind"
source = "/"
target = "/rootfs"
readonly = true
}
mount {
type = "bind"
source = "/var/run"
target = "/var/run"
readonly = false
}
mount {
type = "bind"
source = "/sys"
target = "/sys"
readonly = true
}
mount {
type = "bind"
source = "/var/lib/docker"
target = "/var/lib/docker"
readonly = true
}
# mount {
# type = "bind"
# source = "/cgroup"
# target = "/cgroup"
# readonly = true
# }
mount {
type = "bind"
source = "/etc/machine-id"
target = "/etc/machine-id"
readonly = true
}
}
resources {
cpu = 50
memory = 100
}
}
}
group "node_exporter" {
network {
mode = "bridge"
port "node_exporter" {
to = 9100
}
}
service {
name = "nodeexporter"
port = "node_exporter"
meta {
metrics_addr = "${NOMAD_ADDR_node_exporter}"
nomad_dc = "${NOMAD_DC}"
nomad_node_name = "${node.unique.name}"
}
connect {
sidecar_service {
proxy {
local_service_port = 9100
expose {
path {
path = "/metrics"
protocol = "http"
local_path_port = 9100
listener_port = "node_exporter"
}
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
}
}
}
check {
type = "http"
path = "/metrics"
port = "node_exporter"
interval = "10s"
timeout = "10s"
}
}
task "node_exporter" {
driver = "docker"
config {
image = "prom/node-exporter:v1.0.1"
args = ["--path.rootfs", "/host"]
ports = ["node_exporter"]
mount {
type = "bind"
source = "/"
target = "/host"
readonly = true
}
}
resources {
cpu = 50
memory = 50
}
}
}
group "promtail" {
network {
mode = "bridge"
port "promtail" {
to = 9080
}
}
service {
name = "promtail"
port = "promtail"
meta {
metrics_addr = "${NOMAD_ADDR_promtail}"
nomad_dc = "${NOMAD_DC}"
nomad_node_name = "${node.unique.name}"
}
connect {
sidecar_service {
proxy {
local_service_port = 9080
upstreams {
destination_name = "loki"
local_bind_port = 1000
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
}
}
}
check {
type = "http"
path = "/metrics"
port = "promtail"
interval = "10s"
timeout = "10s"
}
}
task "promtail" {
driver = "docker"
config {
image = "grafana/promtail:2.2.1"
args = ["-config.file=/etc/promtail/promtail.yml"]
ports = ["promtail"]
# Mount config
mount {
type = "bind"
target = "/etc/promtail/promtail.yml"
source = "local/promtail.yml"
}
# Bind mount host machine-id and log directories
mount {
type = "bind"
source = "/etc/machine-id"
target = "/etc/machine-id"
readonly = true
}
mount {
type = "bind"
source = "/var/log/journal/"
target = "/var/log/journal/"
readonly = true
}
mount {
type = "bind"
source = "/run/log/journal/"
target = "/run/log/journal/"
readonly = true
}
# mount {
# type = "bind"
# source = "/var/log/audit"
# target = "/var/log/audit"
# readonly = true
# }
}
template {
data = <<EOF
---
server:
http_listen_address: 0.0.0.0
http_listen_port: 9080
clients:
# loki upstream: {{ env "NOMAD_UPSTREAM_ADDR_loki" }}
- url: http://{{ env "NOMAD_UPSTREAM_ADDR_loki" }}/loki/api/v1/push
scrape_configs:
- job_name: journal
journal:
json: false
max_age: 12h
path: /var/log/journal
labels:
job: systemd-journal
relabel_configs:
- source_labels: ['__journal__systemd_unit']
target_label: unit
- source_labels: ['__journal__hostname']
target_label: hostname
- source_labels: ['__journal__transport']
target_label: journal_transport
# Docker log labels
- source_labels: ['__journal_syslog_identifier']
target_label: syslog_identifier
- source_labels: ['__journal_image_name']
target_label: docker_image_name
- source_labels: ['__journal_container_name']
target_label: docker_container_name
- source_labels: ['__journal_container_id']
target_label: docker_container_id
- source_labels: ['__journal_com_docker_compose_project']
target_label: docker_compose_project
- source_labels: ['__journal_com_docker_compose_service']
target_label: docker_compose_service
EOF
destination = "local/promtail.yml"
}
resources {
cpu = 50
memory = 20
}
}
}
}

View File

@ -1,38 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.0"
hashes = [
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
"zh:32fa31329731db2bf4b7d0f09096416ca146f05b58f4482bbd4ee0f28cefbbcc",
"zh:59136b73d3abe7cc5b06d9e12d123ad21298ca86ed49a4060a3cd7c2a28a74a1",
"zh:a191f3210773ca25c543a92f2d392b85e6a053d596293655b1f25b33eb843b4c",
"zh:b8b6033cf0687eadc1099f11d9fb2ca9429ff40c2d85bd6cb047c0f6bc5d5d8d",
"zh:bb7d67ed28aa9b28fc5154161af003383f940b2beda0d4577857cad700f39cd1",
"zh:be615288f59327b975532a1999deab60a022e6819fe80e5a32526155210ecbba",
"zh:de1e3d5c34eef87eb301e74717754babb6dc8e19e3a964919e1165c5a076a719",
"zh:eb8c61b20d8ce2bfff9f735ca8456a0d6368af13aa1f43866f61c70f88cc491c",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}

View File

@ -1,12 +1,3 @@
terraform {
required_providers {
vault = {
source = "hashicorp/vault"
version = "3.8.0"
}
}
}
# Configure Consul provider # Configure Consul provider
provider "consul" { provider "consul" {
address = var.consul_address address = var.consul_address
@ -33,15 +24,23 @@ locals {
vault_node_address = "http://${local.vault_node.node_address}:${local.vault_node.port}" vault_node_address = "http://${local.vault_node.node_address}:${local.vault_node.port}"
} }
# Configure the Nomad provider
provider "nomad" {
address = local.nomad_node_address
secret_id = var.nomad_secret_id
region = "global"
}
# Configure the Vault provider # Configure the Vault provider
provider "vault" { provider "vault" {
address = local.vault_node_address address = length(var.vault_address) == 0 ? local.vault_node_address : var.vault_address
token = var.vault_token token = var.vault_token
} }
# Something that should exist in a post bootstrap module, right now module includes bootstrapping
# which requries Admin
# data "vault_nomad_access_token" "deploy" {
# backend = "nomad"
# role = "deploy"
# }
# Configure the Nomad provider
provider "nomad" {
address = length(var.nomad_address) == 0 ? local.nomad_node_address : var.nomad_address
secret_id = var.nomad_secret_id
# secret_id = length(var.nomad_secret_id) == 0 ? data.vault_nomad_access_token.admin.secret_id : var.nomad_secret_id
region = "global"
}

45
nomad/recover-consul.yaml Normal file
View File

@ -0,0 +1,45 @@
---
- name: Recovery Consul
hosts: consul_instances
tasks:
- name: Stop Consul
systemd:
name: consul
state: stopped
become: true
- name: Get node-id
slurp:
src: /opt/consul/node-id
register: consul_node_id
become: true
- name: Node Id
debug:
msg: "node_id: {{ consul_node_id.content }}"
- name: Address
debug:
msg: "address: {{ ansible_default_ipv4.address }}"
- name: Save
copy:
dest: "/opt/consul/raft/peers.json"
content: |
[
{% for host in ansible_play_hosts|reject('equalto', inventory_hostname) -%}
{
"id": "{{ hostvars[host].consul_node_id.content }}",
"address": "{{ hostvars[host].ansible_default_ipv4.address }}:8300",
"non_voter": false
}{% if not loop.last %},{% endif %}
{% endfor -%}
]
become: true
- name: Restart Consul
systemd:
name: consul
state: restarted
become: true

45
nomad/recover-nomad.yaml Normal file
View File

@ -0,0 +1,45 @@
---
- name: Recovery Nomad
hosts: nomad_instances
tasks:
- name: Stop Nomad
systemd:
name: nomad
state: stopped
become: true
- name: Get node-id
slurp:
src: /var/nomad/server/node-id
register: nomad_node_id
become: true
- name: Node Id
debug:
msg: "node_id: {{ nomad_node_id.content }}"
- name: Address
debug:
msg: "address: {{ ansible_default_ipv4.address }}"
- name: Save
copy:
dest: /var/nomad/server/raft/peers.json
content: |
[
{% for host in ansible_play_hosts|reject('equalto', inventory_hostname) -%}
{
"id": "{{ hostvars[host].nomad_node_id.content }}",
"address": "{{ hostvars[host].ansible_default_ipv4.address }}:4647",
"non_voter": false
}{% if not loop.last %},{% endif %}
{% endfor -%}
]
become: true
- name: Restart Nomad
systemd:
name: nomad
state: restarted
become: true

View File

@ -7,7 +7,7 @@ roles:
- src: https://github.com/IamTheFij/ansible-nomad.git - src: https://github.com/IamTheFij/ansible-nomad.git
name: ansible-nomad name: ansible-nomad
scm: git scm: git
version: install-repo version: my-main
- src: https://github.com/ansible-community/ansible-vault.git - src: https://github.com/ansible-community/ansible-vault.git
name: ansible-vault name: ansible-vault
scm: git scm: git

View File

@ -6,6 +6,7 @@
# sticky_disk = bool # sticky_disk = bool
# args = json(list[str]) # args = json(list[str])
# resources = dict(cpu = int, mem = int) # resources = dict(cpu = int, mem = int)
# env = json(dict(str: any))
# templates = json(list(dict( # templates = json(list(dict(
# data = str, # data = str,
# dest = str, # dest = str,
@ -14,9 +15,19 @@
# left_delimiter = str, # left_delimiter = str,
# right_delimiter = str, # right_delimiter = str,
# ))) # )))
# host_volumes = json(list(dict(
# name = str,
# dest = str,
# read_only = bool,
# )))
# healthcheck = "/" # healthcheck = "/"
# upstreams = json(list(dict(
# destination_name = str,
# local_bind_port = int
# )))
# mysql = bool # mysql = bool
# redis = bool # redis = bool
# vault = bool
job "[[.name]]" { job "[[.name]]" {
region = "global" region = "global"
datacenters = ["dc1"] datacenters = ["dc1"]
@ -24,17 +35,17 @@ job "[[.name]]" {
type = "service" type = "service"
group "[[.name]]" { group "[[.name]]" {
[[ with .count ]]count = [[ . ]][[end]] [[ with .count ]]count = [[ . ]][[ end ]]
network { network {
mode = "bridge" mode = "bridge"
[[ if not (empty .service_port) ]] [[ if not (empty .service_port) -]]
port "main" { port "main" {
[[ if default false .ingress ]] [[ if default false .ingress -]]
host_network = "loopback" host_network = "loopback"
[[ end ]] [[ end -]]
to = [[.service_port]] to = [[ .service_port ]]
} }
[[ end ]] [[ end -]]
} }
[[ if default false .sticky_disk ]] [[ if default false .sticky_disk ]]
@ -44,6 +55,16 @@ job "[[.name]]" {
} }
[[ end ]] [[ end ]]
[[ with .host_volumes -]]
[[ range $v := . | parseJSON -]]
volume "[[ $v.name ]]" {
type = "host"
read_only = [[ $v.read_only ]]
source = "[[ $v.name ]]"
}
[[ end ]]
[[ end -]]
[[ if not (empty .service_port) ]] [[ if not (empty .service_port) ]]
service { service {
name = "[[.name | replace "_" "-"]]" name = "[[.name | replace "_" "-"]]"
@ -53,19 +74,27 @@ job "[[.name]]" {
connect { connect {
sidecar_service { sidecar_service {
proxy { proxy {
local_service_port = [[.service_port]] local_service_port = [[ .service_port ]]
[[ if default false .mysql ]] [[ if default false .mysql -]]
upstreams { upstreams {
destination_name = "mysql-server" destination_name = "mysql-server"
local_bind_port = 4040 local_bind_port = 4040
} }
[[ end -]] [[ end -]]
[[ if default false .redis ]] [[ if default false .redis -]]
upstreams { upstreams {
destination_name = "redis" destination_name = "redis"
local_bind_port = 6379 local_bind_port = 6379
} }
[[ end -]]
[[ with .upstreams -]]
[[range $u := . | parseJSON -]]
upstreams {
destination_name = "[[ $u.destination_name ]]"
local_bind_port = [[ $u.local_bind_port ]]
}
[[ end ]] [[ end ]]
[[ end -]]
} }
} }
@ -93,10 +122,13 @@ job "[[.name]]" {
[[ if default false .ingress -]] [[ if default false .ingress -]]
"traefik.enable=true", "traefik.enable=true",
"traefik.http.routers.[[.name]].entryPoints=websecure", "traefik.http.routers.[[.name]].entryPoints=websecure",
[[ if not (empty .ingress_rule) -]]
"traefik.http.routers.[[.name]].rule=[[.ingress_rule]]",
[[ end -]]
[[ end -]] [[ end -]]
] ]
} }
[[ end ]] [[ end -]]
task "[[.name]]" { task "[[.name]]" {
driver = "docker" driver = "docker"
@ -105,32 +137,51 @@ job "[[.name]]" {
image = "[[.image]]" image = "[[.image]]"
[[ if not (empty .service_port) -]] [[ if not (empty .service_port) -]]
ports = ["main"] ports = ["main"]
[[- end ]] [[ end -]]
[[ if not (empty .args) -]] [[ if not (empty .args) -]]
args = ["[[ .args | parseJSON | join `", "` ]]"] args = ["[[ .args | parseJSON | join `", "` ]]"]
[[- end ]] [[ end -]]
[[ with .templates]] [[ with .templates -]]
[[ range $t := . | parseJSON ]] [[ range $t := . | parseJSON -]]
mount { mount {
type = "bind" type = "bind"
target = "[[ $t.dest ]]" target = "[[ $t.dest ]]"
source = "local/[[ $t.dest ]]" source = "local/[[ $t.dest ]]"
} }
[[ end ]] [[ end ]]
[[ end ]] [[ end -]]
} }
[[ if default false .vault -]]
vault {
policies = [
"access-tables",
"nomad-task",
]
}
[[ end -]]
[[ with .env -]] [[ with .env -]]
env = { env = {
[[- range $k, $v := . ]] [[ range $k, $v := . | parseJSON -]]
"[[$k]]" = "[[$v]]" "[[$k]]" = "[[$v]]"
[[- end ]] [[ end -]]
}
[[ end -]]
[[ with .host_volumes -]]
[[ range $v := . | parseJSON -]]
volume_mount {
volume = "[[ $v.name ]]"
destination = "[[ $v.dest ]]"
read_only = [[ $v.read_only ]]
} }
[[ end ]] [[ end ]]
[[ end -]]
[[ with .templates ]] [[ with .templates -]]
[[ range $t := . | parseJSON ]] [[ range $t := . | parseJSON -]]
template { template {
data = <<EOF data = <<EOF
[[ $t.data ]] [[ $t.data ]]
@ -142,15 +193,15 @@ EOF
[[ with $t.change_signal ]]change_signal = "[[ . ]]"[[ end -]] [[ with $t.change_signal ]]change_signal = "[[ . ]]"[[ end -]]
[[ with $t.env ]]env = [[ . ]][[ end ]] [[ with $t.env ]]env = [[ . ]][[ end ]]
} }
[[ end ]] [[ end -]]
[[ end ]] [[ end -]]
[[ with .resources ]] [[ with .resources -]]
resources { resources {
cpu = [[ .cpu ]] cpu = [[ .cpu ]]
memory = [[ .memory ]] memory = [[ .memory ]]
} }
[[ end ]] [[ end -]]
} }
} }
} }

View File

@ -1,26 +1,5 @@
module "nextcloud" { module "services" {
source = "./nextcloud" source = "./services"
depends_on = [module.databases] depends_on = [module.databases, module.core]
}
module "backups" {
source = "./backups"
depends_on = [module.databases]
}
module "media" {
source = "./media"
}
resource "nomad_job" "whoami" {
hcl2 {
enabled = true
vars = {
"count" = "${2 * length(data.consul_service.nomad.service)}",
}
}
jobspec = file("${path.module}/whoami.nomad")
} }

View File

@ -0,0 +1,40 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" {
version = "2.16.2"
hashes = [
"h1:epldE7sZPBTQHnWEA4WlNJIOVT1UEX+/02SMg5nniaE=",
"zh:0a2e11ca2ba650954951a087a1daec95eee2f3000456b295409a9880c4a10b1a",
"zh:34f6bda06a0d1c213fa8d87d4313687681e67bc8c40c4cbaa7dbe59ce24a4f7e",
"zh:5b85cf93db11ee890f720c317a38158927071feb634855786a0c0cd65825a43c",
"zh:75ef915f3d087e6045751a66fbb7066a852a0944ec8c97200d1134dd84df7ffc",
"zh:8a4a95697bd91ad51a581c12fe50ac61a114afba27895d027f77ac4154a7ea15",
"zh:973d538c8d72793861a1ac9718249a9493f417a2b5096846367560054fd843b9",
"zh:9feb2bdc06fdc2d8370cc9aad9a0c69e7e5ae38aac43f315c3f57507c57be030",
"zh:c5709672d0afecbbe298bf519741ebcb9d04f02a73b5ee0c186dfa241aa5a524",
"zh:c65c60570de6da7190e1e7762577655a463caeb59bc5d38e33034821ed0cbcb9",
"zh:c958d6282650fc472aade61d5df4300936033f43cfb898293ef86aceccdfdf1d",
"zh:cdd3632c81e1d11d3becd193aaa061688840f39147950c45c4301d042743ae6a",
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.19"
hashes = [
"h1:EdBny2gaLr/IE+l+6csyCKeIGFMYZ/4tHKpcbS7ArgE=",
"zh:2f3ceeb3318a6304026035b0ac9ee3e52df04913bb9ee78827e58c5398b41254",
"zh:3fbe76c7d957d20dfe3c8c0528b33084651f22a95be9e0452b658e0922916e2a",
"zh:595671a05828cfe6c42ef73aac894ac39f81a52cc662a76f37eb74ebe04ddf75",
"zh:5d76e8788d2af3e60daf8076babf763ec887480bbb9734baccccd8fcddf4f03e",
"zh:676985afeaca6e67b22d60d43fd0ed7055763029ffebc3026089fe2fd3b4a288",
"zh:69152ce6164ac999a640cff962ece45208270e1ac37c10dac484eeea5cf47275",
"zh:6da0b15c05b81f947ec8e139bd81eeeb05c0d36eb5a967b985d0625c60998b40",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:822c0a3bbada5e38099a379db8b2e339526843699627c3be3664cc3b3752bab7",
"zh:af23af2f98a84695b25c8eba7028a81ad4aad63c44aefb79e01bbe2dc82e7f78",
"zh:e36cac9960b7506d92925b667254322520966b9c3feb3ca6102e57a1fb9b1761",
"zh:ffd1e096c1cc35de879c740a91918e9f06b627818a3cb4b1d87b829b54a6985f",
]
}

View File

@ -2,26 +2,28 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" { provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.0" version = "2.16.2"
hashes = [ hashes = [
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=", "h1:epldE7sZPBTQHnWEA4WlNJIOVT1UEX+/02SMg5nniaE=",
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c", "zh:0a2e11ca2ba650954951a087a1daec95eee2f3000456b295409a9880c4a10b1a",
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931", "zh:34f6bda06a0d1c213fa8d87d4313687681e67bc8c40c4cbaa7dbe59ce24a4f7e",
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624", "zh:5b85cf93db11ee890f720c317a38158927071feb634855786a0c0cd65825a43c",
"zh:32fa31329731db2bf4b7d0f09096416ca146f05b58f4482bbd4ee0f28cefbbcc", "zh:75ef915f3d087e6045751a66fbb7066a852a0944ec8c97200d1134dd84df7ffc",
"zh:59136b73d3abe7cc5b06d9e12d123ad21298ca86ed49a4060a3cd7c2a28a74a1", "zh:8a4a95697bd91ad51a581c12fe50ac61a114afba27895d027f77ac4154a7ea15",
"zh:a191f3210773ca25c543a92f2d392b85e6a053d596293655b1f25b33eb843b4c", "zh:973d538c8d72793861a1ac9718249a9493f417a2b5096846367560054fd843b9",
"zh:b8b6033cf0687eadc1099f11d9fb2ca9429ff40c2d85bd6cb047c0f6bc5d5d8d", "zh:9feb2bdc06fdc2d8370cc9aad9a0c69e7e5ae38aac43f315c3f57507c57be030",
"zh:bb7d67ed28aa9b28fc5154161af003383f940b2beda0d4577857cad700f39cd1", "zh:c5709672d0afecbbe298bf519741ebcb9d04f02a73b5ee0c186dfa241aa5a524",
"zh:be615288f59327b975532a1999deab60a022e6819fe80e5a32526155210ecbba", "zh:c65c60570de6da7190e1e7762577655a463caeb59bc5d38e33034821ed0cbcb9",
"zh:de1e3d5c34eef87eb301e74717754babb6dc8e19e3a964919e1165c5a076a719", "zh:c958d6282650fc472aade61d5df4300936033f43cfb898293ef86aceccdfdf1d",
"zh:eb8c61b20d8ce2bfff9f735ca8456a0d6368af13aa1f43866f61c70f88cc491c", "zh:cdd3632c81e1d11d3becd193aaa061688840f39147950c45c4301d042743ae6a",
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
] ]
} }
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16" version = "1.4.16"
hashes = [ hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=", "h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e", "zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572", "zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",

View File

@ -1,15 +1,8 @@
variable "nextcloud_backup" { job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
type = string
description = "HCL config for Restic Scheduler jobs"
}
variable "consul_backup" {
type = string
description = "HCL config for Restic Scheduler jobs"
}
job "backup-oneoff-n2" {
datacenters = ["dc1"] datacenters = ["dc1"]
%{ if batch_node == null ~}
type = "system"
%{ else ~}
type = "batch" type = "batch"
parameterized { parameterized {
@ -22,14 +15,21 @@ job "backup-oneoff-n2" {
task = "backup" task = "backup"
snapshot = "latest" snapshot = "latest"
} }
%{ endif ~}
%{ if batch_node == null ~}
constraint { constraint {
attribute = "${node.unique.name}" attribute = "$${node.unique.name}"
# Only node with a backup job so far operator = "set_contains_any"
# Remove when backing up all nodes # Only deploy to nodes running tasks to backup
value = "n2" value = "n1,n2"
} }
%{ else ~}
constraint {
attribute = "$${node.unique.name}"
value = "${batch_node}"
}
%{ endif ~}
group "backup" { group "backup" {
@ -43,11 +43,12 @@ job "backup-oneoff-n2" {
volume "all-volumes" { volume "all-volumes" {
type = "host" type = "host"
read_only = true read_only = false
source = "all-volumes" source = "all-volumes"
} }
service { service {
name = "backups"
port = "metrics" port = "metrics"
# Add connect to mysql # Add connect to mysql
@ -76,7 +77,7 @@ job "backup-oneoff-n2" {
} }
meta { meta {
metrics_addr = "${NOMAD_ADDR_metrics}" metrics_addr = "$${NOMAD_ADDR_metrics}"
} }
} }
@ -86,17 +87,18 @@ job "backup-oneoff-n2" {
volume_mount { volume_mount {
volume = "all-volumes" volume = "all-volumes"
destination = "/data" destination = "/data"
read_only = true read_only = false
} }
config { config {
image = "iamthefij/resticscheduler" image = "iamthefij/resticscheduler"
ports = ["metrics"] ports = ["metrics"]
args = [ args = [
%{ if batch_node != null ~}
"-once", "-once",
"-${NOMAD_META_task}", "-$${NOMAD_META_task}",
"${NOMAD_META_job_name}", "$${NOMAD_META_job_name}",
# TODO: add restore arg here %{ endif ~}
"/jobs/node-jobs.hcl", "/jobs/node-jobs.hcl",
] ]
@ -115,21 +117,26 @@ job "backup-oneoff-n2" {
} }
env = { env = {
"MYSQL_HOST" = "${NOMAD_UPSTREAM_IP_mysql_server}" "MYSQL_HOST" = "$${NOMAD_UPSTREAM_IP_mysql_server}"
"MYSQL_PORT" = "${NOMAD_UPSTREAM_PORT_mysql_server}" "MYSQL_PORT" = "$${NOMAD_UPSTREAM_PORT_mysql_server}"
} }
template { template {
# Probably want to use database credentials that have access to dump all tables # Probably want to use database credentials that have access to dump all tables
data = <<EOF data = <<EOF
{{ with secret "kv/data/nextcloud" }} {{ with secret "kv/data/nextcloud" -}}
MYSQL_DATABASE={{ .Data.data.db_name }} MYSQL_DATABASE={{ .Data.data.db_name }}
MYSQL_USER={{ .Data.data.db_user }} MYSQL_USER={{ .Data.data.db_user }}
MYSQL_PASSWORD={{ .Data.data.db_pass }} MYSQL_PASSWORD={{ .Data.data.db_pass }}
{{ end }} {{ end -}}
{{ with secret "kv/data/backups" }} {{ with secret "kv/data/backups" -}}
BACKUP_PASSPHRASE={{ .Data.data.backup_passphrase }} BACKUP_PASSPHRASE={{ .Data.data.backup_passphrase }}
{{ end }} RCLONE_FTP_HOST={{ .Data.data.nas_ftp_host }}
RCLONE_FTP_USER={{ .Data.data.nas_ftp_user }}
RCLONE_FTP_PASS={{ .Data.data.nas_ftp_pass | toJSON }}
RCLONE_FTP_EXPLICIT_TLS=true
RCLONE_FTP_NO_CHECK_CERTIFICATE=true
{{ end -}}
EOF EOF
destination = "secrets/db.env" destination = "secrets/db.env"
env = true env = true
@ -148,16 +155,38 @@ CONSUL_HTTP_ADDR={{ env "attr.unique.network.ip-address" }}:8500
# Build jobs based on node # Build jobs based on node
data = <<EOF data = <<EOF
# Current node is {{ env "node.unique.name" }} # Current node is {{ env "node.unique.name" }}
# Consul backup below?
{{ if eq (env "node.unique.name") "n2" -}} {{ if eq (env "node.unique.name") "n2" -}}
# Consul backup # Consul backup
${var.consul_backup} ${file("${module_path}/jobs/consul.hcl")}
{{ end -}} {{ end -}}
{{ range service "nextcloud" }}
{{ range service "nextcloud" -}}
# Nextcloud .Node {{ .Node }} # Nextcloud .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") }} {{ if eq .Node (env "node.unique.name") -}}
${var.nextcloud_backup} ${file("${module_path}/jobs/nextcloud.hcl")}
{{ end }}{{ end }} {{ end -}}
{{ end -}}
{{ range service "lldap" -}}
# Lldap .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/lldap.hcl")}
{{ end -}}
{{ end -}}
{{ range service "sonarr" -}}
# Lldap .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/sonarr.hcl")}
{{ end -}}
{{ end -}}
{{ range service "nzbget" -}}
# Lldap .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/nzbget.hcl")}
{{ end -}}
{{ end -}}
EOF EOF
destination = "jobs/node-jobs.hcl" destination = "jobs/node-jobs.hcl"
} }

View File

@ -0,0 +1,24 @@
resource "nomad_job" "backups" {
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = "${path.module}",
batch_node = null,
})
}
# Get Nomad clients from Consul
# data "consul_service" "nomad" {
# name = "nomad-client"
# }
resource "nomad_job" "backups-oneoff" {
# TODO: Get list of nomad hosts dynamically
for_each = toset(["n1", "n2"])
# for_each = toset([
# for node in data.consul_service.nomad.service :
# node.node_name
# ])
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = "${path.module}",
batch_node = each.key,
})
}

View File

@ -1,23 +1,11 @@
job "Consul" { job "Consul" {
schedule = "* * * * *" schedule = "0 * * * *"
config { config {
# TODO: Backup to a meaningful location, this is just for testing repo = "rclone::ftp,env_auth:/nomad/consul"
repo = "/local/repo"
# Read from secret file
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
} }
# Remove when using a proper backup destination
task "Create dir for repo" {
pre_script {
on_backup = "echo 'Backing up something'"
}
pre_script {
on_backup = "mkdir -p /local/repo"
}
}
task "Use consul snapshots" { task "Use consul snapshots" {
pre_script { pre_script {
on_backup = "mkdir -p /local/consul" on_backup = "mkdir -p /local/consul"

View File

@ -0,0 +1,27 @@
job "lldap" {
schedule = "@daily"
config {
repo = "rclone::ftp,env_auth:/nomad/lldap"
passphrase = env("BACKUP_PASSPHRASE")
}
# sqlite "Backup database" {
# path = "/data/lldap/users.db"
# # sqlite3 /data/lldap/users.db .backup /data/lldap/users.db.bak
# dump_to = "/data/lldap/users.db.bak"
# }
backup {
paths = ["/data/lldap"]
# Because path is absolute
restore_opts {
Target = "/"
}
}
forget {
KeepLast = 2
Prune = true
}
}

View File

@ -1,23 +1,11 @@
job "Nextcloud" { job "Nextcloud" {
schedule = "* * * * *" schedule = "0 * * * *"
config { config {
# TODO: Backup to a meaningful location, this is just for testing repo = "rclone::ftp,env_auth:/nomad/nextcloud"
repo = "/local/repo"
# Read from secret file
passphrase = env("BACKUP_PASSPHRASE") passphrase = env("BACKUP_PASSPHRASE")
} }
# Remove when using a proper backup destination
task "Create dir for repo" {
pre_script {
on_backup = "echo 'Backing up something'"
}
pre_script {
on_backup = "mkdir -p /local/repo"
}
}
mysql "Backup database" { mysql "Backup database" {
hostname = env("MYSQL_HOST") hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT") port = env("MYSQL_PORT")

View File

@ -0,0 +1,21 @@
job "nzbget" {
schedule = "@daily"
config {
repo = "rclone::ftp,env_auth:/nomad/nzbget"
passphrase = env("BACKUP_PASSPHRASE")
}
backup {
paths = ["/data/nzbget"]
# Because path is absolute
restore_opts {
Target = "/"
}
}
forget {
KeepLast = 2
Prune = true
}
}

View File

@ -0,0 +1,27 @@
job "sonarr" {
schedule = "@daily"
config {
repo = "rclone::ftp,env_auth:/nomad/sonarr"
passphrase = env("BACKUP_PASSPHRASE")
}
# sqlite "Backup database" {
# path = "/data/lldap/users.db"
# # sqlite3 /data/lldap/users.db .backup /data/lldap/users.db.bak
# dump_to = "/data/lldap/users.db.bak"
# }
backup {
paths = ["/data/sonarr"]
# Because path is absolute
restore_opts {
Target = "/"
}
}
forget {
KeepLast = 2
Prune = true
}
}

View File

@ -0,0 +1,29 @@
resource "consul_service" "homeassistant" {
name = "hass"
node = consul_node.homeassistant.name
port = 8123
tags = [
"traefik.enable=true",
"traefik.consulcatalog.connect=false",
"traefik.http.routers.hass.entryPoints=websecure",
]
check {
check_id = "homeassistant:hass"
status = "passing"
name = "Home Assistant Health Check"
http = "192.168.3.65:8123"
interval = "30s"
timeout = "10s"
}
}
resource "consul_node" "homeassistant" {
name = "homeassistant"
address = "192.168.3.65"
meta = {
"external-node" = "true"
"external-probe" = "true"
}
}

201
nomad/services/ip-dvr.nomad Normal file
View File

@ -0,0 +1,201 @@
job "ipdvr" {
region = "global"
datacenters = ["dc1"]
type = "service"
group "nzbget" {
network {
mode = "bridge"
port "main" {
host_network = "loopback"
to = 6789
}
}
volume "nzbget-data" {
type = "host"
read_only = false
source = "nzbget-data"
}
volume "download" {
type = "host"
read_only = false
source = "download"
}
service {
name = "nzbget"
port = "main"
connect {
sidecar_service {
proxy {
local_service_port = 6789
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
memory_max = 50
}
}
}
# check {
# type = "http"
# path = "/"
# port = "main"
# interval = "10s"
# timeout = "10s"
# }
tags = [
"traefik.enable=true",
"traefik.http.routers.nzbget.entryPoints=websecure",
]
}
task "nzbget" {
driver = "docker"
config {
image = "linuxserver/nzbget"
ports = ["main"]
}
env = {
"PGID" = 100
"PUID" = 1001
"TZ" = "America/Los_Angeles"
}
volume_mount {
volume = "nzbget-data"
destination = "/config"
read_only = false
}
volume_mount {
volume = "download"
destination = "/downloads"
read_only = false
}
resources {
cpu = 200
memory = 200
memory_max = 500
}
}
}
group "sonarr" {
network {
mode = "bridge"
port "main" {
host_network = "loopback"
to = 8989
}
}
volume "sonarr-data" {
type = "host"
read_only = false
source = "sonarr-data"
}
volume "tv-sonarr" {
type = "host"
read_only = false
source = "tv-sonarr"
}
volume "download" {
type = "host"
read_only = false
source = "download"
}
service {
name = "sonarr"
port = "main"
connect {
sidecar_service {
proxy {
local_service_port = 8989
upstreams {
destination_name = "nzbget"
local_bind_port = 6789
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
memory_max = 50
}
}
}
# check {
# type = "http"
# path = "/"
# port = "main"
# interval = "10s"
# timeout = "10s"
# }
tags = [
"traefik.enable=true",
"traefik.http.routers.sonarr.entryPoints=websecure",
]
}
task "sonarr" {
driver = "docker"
config {
image = "linuxserver/sonarr"
ports = ["main"]
}
env = {
"PGID" = 100
"PUID" = 1001
"TZ" = "America/Los_Angeles"
}
volume_mount {
volume = "sonarr-data"
destination = "/config"
read_only = false
}
volume_mount {
volume = "tv-sonarr"
destination = "/tv"
read_only = false
}
volume_mount {
volume = "download"
destination = "/downloads"
read_only = false
}
resources {
cpu = 100
memory = 300
memory_max = 500
}
}
}
}

109
nomad/services/main.tf Normal file
View File

@ -0,0 +1,109 @@
# module "nextcloud" {
# source = "./nextcloud"
#
# depends_on = [module.databases]
# }
module "backups" {
source = "./backups"
# In parent module
# depends_on = [module.databases]
}
module "media" {
source = "./media"
}
resource "nomad_job" "whoami" {
hcl2 {
enabled = true
vars = {
"count" = 1,
# "count" = "${2 * length(data.consul_service.nomad.service)}",
}
}
jobspec = file("${path.module}/whoami.nomad")
}
resource "nomad_job" "ipdvr" {
jobspec = file("${path.module}/ip-dvr.nomad")
}
resource "consul_config_entry" "nzbget_intents" {
depends_on = [nomad_job.ipdvr]
name = "nzbget"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "sonarr"
Precedence = 9
Type = "consul"
},
]
})
}
# module "nzbget" {
# source "./levant"
#
# template_path = "service.nomad"
# variables = {
# name = "nzbget"
# image = "linuxserver/nzbget"
# service_port = 6789
# ingress = true
# env = jsonencode({
# PGID = 100
# PUID = 1001
# TZ = "America/Los_Angeles"
# })
# host_volumes = jsonencode([
# {
# name = "download"
# dest = "/srv/volumes/download"
# read_only = false
# },
# ])
# }
# }
#
# module "sonarr" {
# source = "./levant"
#
# template_path = "service.nomad"
# variables = {
# name = "sonarr"
# image = "linuxserver/sonarr"
# service_port = 8989
# ingress = true
# env = jsonencode({
# PGID = 100
# PUID = 1001
# TZ = "America/Los_Angeles"
#
# })
# host_volumes = jsonencode([
# {
# name = "sonarr-data"
# dest = "/config"
# read_only = false
# },
# {
# name = "tv-sonarr"
# dest = "/srv/volumes/media-write/TV Shows"
# read_only = false
# },
# {
# name = "download"
# dest = "/srv/volumes/download"
# read_only = false
# },
# ])
# }
# }

View File

@ -4,6 +4,7 @@
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16" version = "1.4.16"
hashes = [ hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=", "h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e", "zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572", "zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",

View File

@ -4,6 +4,7 @@
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16" version = "1.4.16"
hashes = [ hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=", "h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e", "zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572", "zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",

View File

@ -3,34 +3,34 @@
hosts: consul_instances hosts: consul_instances
any_errors_fatal: true any_errors_fatal: true
vars_files:
- consul_values.yml
roles: roles:
- role: ansible-consul - role: ansible-consul
vars: vars:
consul_version: "1.12.3-1" consul_version: "1.13.3-1"
consul_install_upgrade: true consul_install_upgrade: true
consul_install_from_repo: true consul_install_from_repo: true
consul_os_repo_prerequisites: [] consul_os_repo_prerequisites: []
consul_node_role: server consul_node_role: server
consul_bootstrap_expect: true consul_bootstrap_expect: true
consul_bootstrap_expect_value: "{{ [(play_hosts | length), 3] | min }}"
consul_user: consul consul_user: consul
consul_manage_user: true consul_manage_user: true
consul_group: bin consul_group: bin
consul_manage_group: true consul_manage_group: true
consul_architecture_map:
x86_64: amd64
armhfv6: arm
armv7l: arm
# consul_tls_enable: true # consul_tls_enable: true
consul_connect_enabled: true consul_connect_enabled: true
consul_ports_grpc: 8502 consul_ports_grpc: 8502
consul_client_address: "0.0.0.0" consul_client_address: "0.0.0.0"
consul_acl_enabled: true # Autopilot
consul_acl_default_policy: "deny" consul_autopilot_enable: true
consul_autopilot_cleanup_dead_Servers: true
# Enable metrics # Enable metrics
consul_config_custom: consul_config_custom:
@ -52,7 +52,6 @@
become: true become: true
tasks: tasks:
# Bootstrap ACLs
- name: Start Consul - name: Start Consul
systemd: systemd:
state: started state: started
@ -61,33 +60,23 @@
# If DNS is broken after dnsmasq, then need to set /etc/resolv.conf to something # If DNS is broken after dnsmasq, then need to set /etc/resolv.conf to something
# pointing to 127.0.0.1 and possibly restart Docker and Nomad # pointing to 127.0.0.1 and possibly restart Docker and Nomad
- name: Update resolv.conf
- name: Boostrap ACLs lineinfile:
command: dest: /etc/resolv.conf
argv: create: true
- "consul" line: "nameserver 127.0.0.1"
- "acl" become: true
- "bootstrap"
- "-format=json"
run_once: true
ignore_errors: true
register: bootstrap_result
- name: Save bootstrap result
copy:
content: "{{ bootstrap_result.stdout }}"
dest: "./consul_bootstrap.json"
when: bootstrap_result is succeeded
delegate_to: localhost
run_once: true
- name: Setup Vault cluster - name: Setup Vault cluster
hosts: vault_instances hosts: vault_instances
vars_files:
- ./vault_hashi_vault_values.yml
roles: roles:
- name: ansible-vault - name: ansible-vault
vars: vars:
vault_version: 1.10.0 vault_version: 1.12.0-1
vault_install_hashi_repo: true vault_install_hashi_repo: true
vault_harden_file_perms: true vault_harden_file_perms: true
vault_bin_path: /usr/bin vault_bin_path: /usr/bin
@ -104,7 +93,6 @@
status_code: 200, 429, 472, 473, 501, 503 status_code: 200, 429, 472, 473, 501, 503
body_format: json body_format: json
return_content: true return_content: true
run_once: true
register: vault_status register: vault_status
- name: Initialize Vault - name: Initialize Vault
@ -157,28 +145,24 @@
- unseal_keys_hex is defined - unseal_keys_hex is defined
- vault_status.json["sealed"] - vault_status.json["sealed"]
- name: Bootstrap Vault secrets - name: Install Docker
delegate_to: localhost hosts: nomad_instances
run_once: true become: true
block: vars:
- name: Install hvac docker_architecture_map:
pip: x86_64: amd64
name: hvac armv7l: armhf
extra_args: --index-url https://pypi.org/simple aarch64: arm64
docker_apt_arch: "{{ docker_architecture_map[ansible_architecture] }}"
docker_compose_arch: "{{ (ansible_architecture == 'armv7l') | ternary('armv7', ansible_architecture) }}"
roles:
- geerlingguy.docker
# TODO: This fails on first run because `root_token` isn't found tasks:
# Fails after taht too because the kv/ space has not been created yet either! Oh noes! - name: Remove snapd
# Maybe move data bootstrapping to after the cluster is bootstrapped package:
- name: Write values name: snapd
no_log: true state: absent
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "kv/data/{{ item.key }}"
data:
data:
"{{ item.value }}"
loop: "{{ hashi_vault_values | default({}) | dict2items }}"
# Not on Ubuntu 20.04 # Not on Ubuntu 20.04
# - name: Install Podman # - name: Install Podman
@ -217,15 +201,29 @@
state: mounted state: mounted
fstype: nfs4 fstype: nfs4
- name: Install Docker - name: Create Media Library RW NFS mount
hosts: nomad_instances ansible.posix.mount:
become: true src: 192.168.2.10:/Multimedia
vars: path: /srv/volumes/media-write
deb_arch: "{% if ansible_architecture == 'x86_64' %}amd64{% elif ansible_architecture == 'armv7l' %}armhf{% endif %}" opts: proto=tcp,port=2049,rw
docker_apt_arch: "{{ deb_arch }}" state: mounted
docker_compose_arch: "{{ (ansible_architecture == 'armv7l') | ternary('armv7', ansible_architecture) }}" fstype: nfs4
roles:
- geerlingguy.docker - name: Create Download RW NFS mount
ansible.posix.mount:
src: 192.168.2.10:/Download
path: /srv/volumes/download
opts: proto=tcp,port=2049,rw
state: mounted
fstype: nfs4
- name: Create Container NAS RW NFS mount
ansible.posix.mount:
src: 192.168.2.10:/Container
path: /srv/volumes/container
opts: proto=tcp,port=2049,rw
state: mounted
fstype: nfs4
- name: Build Nomad cluster - name: Build Nomad cluster
hosts: nomad_instances hosts: nomad_instances
@ -237,45 +235,60 @@
- name: motioneye-recordings - name: motioneye-recordings
path: /srv/volumes/motioneye-recordings path: /srv/volumes/motioneye-recordings
owner: "root" owner: "root"
group: "bin" group: "root"
mode: "0755" mode: "0755"
read_only: false read_only: false
- name: media-read - name: media-read
path: /srv/volumes/media-read path: /srv/volumes/media-write
read_only: true
- name: media-write
path: /srv/volumes/media-write
owner: "root" owner: "root"
group: "root" group: "root"
mode: "0777" mode: "0755"
read_only: true read_only: false
- name: tv-sonarr
path: "/srv/volumes/media-write/TV Shows"
owner: 1001
group: 100
mode: "0755"
read_only: false
- name: download
path: /srv/volumes/download
owner: 1001
group: 100
mode: "0755"
read_only: false
- name: nzbget-data
path: /srv/volumes/container/nzbget/config
read_only: false
- name: gitea-data
path: /srv/volumes/container/gitea
read_only: false
- name: all-volumes - name: all-volumes
path: /srv/volumes path: /srv/volumes
owner: "root" owner: "root"
group: "root" group: "root"
mode: "0777" mode: "0755"
read_only: false read_only: false
roles: roles:
- name: ansible-nomad - name: ansible-nomad
vars: vars:
nomad_version: "1.3.2-1" nomad_version: "1.4.1-1"
nomad_install_remotely: true
nomad_install_upgrade: true nomad_install_upgrade: true
nomad_allow_purge_config: true nomad_allow_purge_config: true
nomad_meta:
# There are issues with v1.23.0 on arm64
connect.sidecar_image: envoyproxy/envoy:v1.23.1
# Where nomad gets installed to # Where nomad gets installed to
nomad_bin_dir: /usr/bin nomad_bin_dir: /usr/bin
nomad_install_from_repo: true nomad_install_from_repo: true
# nomad_user: root nomad_bootstrap_expect: "{{ [(play_hosts | length), 3] | min }}"
# nomad_manage_user: true nomad_raft_protocol: 3
# nomad_group: bin
# nomad_manage_group: true
# Properly map install arch
nomad_architecture_map:
x86_64: amd64
armhfv6: arm
armv7l: arm
nomad_autopilot: true nomad_autopilot: true
nomad_encrypt_enable: true nomad_encrypt_enable: true
# nomad_use_consul: true # nomad_use_consul: true
@ -321,9 +334,6 @@
# Create networks for binding task ports # Create networks for binding task ports
nomad_host_networks: nomad_host_networks:
# - name: public
# interface: eth0
# reserved_ports: "22"
- name: nomad-bridge - name: nomad-bridge
interface: nomad interface: nomad
reserved_ports: "22" reserved_ports: "22"
@ -335,11 +345,19 @@
nomad_acl_enabled: true nomad_acl_enabled: true
# Enable vault integration # Enable vault integration
# HACK: Only talk to local Vault for now because it doesn't have HTTPS
# TODO: Would be really great to have this over https and point to vault.consul.service
# nomad_vault_address: "https://vault.service.consul:8200"
# Right now, each node only talks to it's local Vault, so if that node is rebooted and
# that vault is sealed, it will not have access to vault. This is a problem if a node
# must reboot.
nomad_vault_address: "http://127.0.0.1:8200"
# TODO: This fails on first run because the Nomad-Vault integration can't be set up # TODO: This fails on first run because the Nomad-Vault integration can't be set up
# until Nomad has started. Could maybe figure out if ACLs have been set up and leave # until Nomad has started. Could maybe figure out if ACLs have been set up and leave
# these out until the later play # these out until the later play, maybe just bootstrap the nomad-cluster role in Vault
nomad_vault_address: "http://vault.service.consul:8200" # befor Nomad is set up
nomad_vault_create_from_role: "nomad-cluster" nomad_vault_create_from_role: "nomad-cluster"
# TODO: (security) Probably want to restict this to a narrower scoped token
nomad_vault_enabled: "{{ root_token is defined }}" nomad_vault_enabled: "{{ root_token is defined }}"
nomad_vault_token: "{{ root_token | default('') }}" nomad_vault_token: "{{ root_token | default('') }}"
@ -347,26 +365,36 @@
ui: ui:
enabled: true enabled: true
consul: consul:
ui_url: "http://{{ ansible_hostname }}:8500/ui" ui_url: "https://{{ ansible_hostname }}:8500/ui"
vault: vault:
ui_url: "http://{{ ansible_hostname }}:8200/ui" ui_url: "https://{{ ansible_hostname }}:8200/ui"
consul: consul:
tags: tags:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.consulcatalog.connect=true" - "traefik.consulcatalog.connect=true"
- "traefik.http.routers.nomadclient.entrypoints=websecure" - "traefik.http.routers.nomadclient.entrypoints=websecure"
- name: Bootstrap Nomad ACLs and scheduler
hosts: nomad_instances
tasks: tasks:
- name: Start Nomad - name: Start Nomad
systemd: systemd:
state: started state: started
name: nomad name: nomad
- name: Bootstrap Nomad ACLs - name: Nomad API reachable?
hosts: nomad_instances uri:
url: "http://127.0.0.1:4646/v1/status/leader"
method: GET
status_code: 200
register: nomad_check_result
retries: 6
until: nomad_check_result is succeeded
delay: 10
changed_when: false
run_once: true
tasks:
# Need to wait until nomad is running
- name: Bootstrap ACLs - name: Bootstrap ACLs
command: command:
argv: argv:
@ -386,16 +414,6 @@
delegate_to: localhost delegate_to: localhost
run_once: true run_once: true
- name: Look for policy
command:
argv:
- nomad
- acl
- policy
- list
run_once: true
register: policies
- name: Read secret - name: Read secret
command: command:
argv: argv:
@ -409,9 +427,35 @@
changed_when: false changed_when: false
register: read_secretid register: read_secretid
- name: Enable service scheduler preemption
command:
argv:
- nomad
- operator
- scheduler
- set-config
- -preempt-system-scheduler=true
- -preempt-service-scheduler=true
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
delegate_to: "{{ play_hosts[0] }}"
run_once: true
- name: Look for policy
command:
argv:
- nomad
- acl
- policy
- list
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
run_once: true
register: policies
- name: Copy policy - name: Copy policy
copy: copy:
src: ./acls/nomad-anon-bootstrap.hcl src: ./acls/nomad-anon-policy.hcl
dest: /tmp/anonymous.policy.hcl dest: /tmp/anonymous.policy.hcl
delegate_to: "{{ play_hosts[0] }}" delegate_to: "{{ play_hosts[0] }}"
register: anon_policy register: anon_policy
@ -424,7 +468,7 @@
- acl - acl
- policy - policy
- apply - apply
- -description="Anon RW" - -description="Anon read only"
- anonymous - anonymous
- /tmp/anonymous.policy.hcl - /tmp/anonymous.policy.hcl
environment: environment:
@ -443,3 +487,13 @@
nomad_secret_id: "{{ read_secretid.stdout }}" nomad_secret_id: "{{ read_secretid.stdout }}"
delegate_to: localhost delegate_to: localhost
run_once: true run_once: true
notify:
- Restart Nomad
handlers:
- name: Restart Nomad
systemd:
state: restarted
name: nomad
retries: 6
delay: 5

View File

@ -1,38 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.0"
hashes = [
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
"zh:32fa31329731db2bf4b7d0f09096416ca146f05b58f4482bbd4ee0f28cefbbcc",
"zh:59136b73d3abe7cc5b06d9e12d123ad21298ca86ed49a4060a3cd7c2a28a74a1",
"zh:a191f3210773ca25c543a92f2d392b85e6a053d596293655b1f25b33eb843b4c",
"zh:b8b6033cf0687eadc1099f11d9fb2ca9429ff40c2d85bd6cb047c0f6bc5d5d8d",
"zh:bb7d67ed28aa9b28fc5154161af003383f940b2beda0d4577857cad700f39cd1",
"zh:be615288f59327b975532a1999deab60a022e6819fe80e5a32526155210ecbba",
"zh:de1e3d5c34eef87eb301e74717754babb6dc8e19e3a964919e1165c5a076a719",
"zh:eb8c61b20d8ce2bfff9f735ca8456a0d6368af13aa1f43866f61c70f88cc491c",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}

View File

@ -1,29 +0,0 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
variable "consul_address" {
type = string
description = "address of consul server for dynamic routes"
}
data "consul_nodes" "all-nodes" {
query_options {
datacenter = "dc1"
}
}
resource "nomad_job" "traefik" {
hcl2 {
enabled = true
vars = {
# "consul_address" = "${var.consul_address}",
"consul_address" = "http://${data.consul_nodes.all-nodes.nodes[0].address}:8500",
"base_hostname" = "${var.base_hostname}",
}
}
jobspec = file("${path.module}/traefik.nomad")
}

27
nomad/unseal-vault.yml Normal file
View File

@ -0,0 +1,27 @@
---
- name: Unseal Vault
hosts: vault_instances
tasks:
- name: Get Vault status
uri:
url: http://127.0.0.1:8200/v1/sys/health
method: GET
status_code: 200, 429, 472, 473, 501, 503
body_format: json
return_content: true
register: vault_status
- name: Unseal Vault
no_log: true
command:
argv:
- "vault"
- "operator"
- "unseal"
- "-address=http://127.0.0.1:8200/"
- "{{ item }}"
loop: "{{ unseal_keys_hex }}"
when:
- unseal_keys_hex is defined
- vault_status.json["sealed"]

View File

@ -3,6 +3,16 @@ variable "consul_address" {
default = "http://n1.thefij:8500" default = "http://n1.thefij:8500"
} }
variable "vault_address" {
type = string
default = ""
}
variable "nomad_address" {
type = string
default = ""
}
variable "base_hostname" { variable "base_hostname" {
type = string type = string
description = "Base hostname to serve content from" description = "Base hostname to serve content from"

View File

@ -1,5 +0,0 @@
resource "vault_mount" "kv" {
path = "kv"
type = "kv-v2"
description = "Catch all kv mount"
}