Compare commits

...

218 Commits

Author SHA1 Message Date
5e0393823d Tell service action scheduler to use task socket 2024-10-02 12:07:37 -07:00
78320a8ea4 Upgrade unifi traffic routes to fix bug 2024-10-02 11:29:24 -07:00
03ce47320c Add scheduled import for photoprism 2024-10-02 11:28:24 -07:00
8641bd50e1 Add ability to add actions and schedule them for services 2024-10-02 11:26:57 -07:00
9d7a8029c1 Add additional ansible dependency 2024-10-02 11:25:34 -07:00
9c2bce3fab Clean up lego 2024-10-02 11:25:19 -07:00
c248edfc52 Spit out log message when retrying db connection on bootstrap 2024-10-02 11:24:58 -07:00
1bc46d957a Upgrade nomad 2024-10-02 11:23:39 -07:00
8866434590 Increase db bootstrap timeouts
Prevents service flapping while database is trying to recover
2024-08-30 11:30:02 -07:00
1c783dbdfe Make sure blocky bootstrap never fails
We want to make sure the blocky task is always started, even if mysql isn't reached
2024-08-30 11:27:28 -07:00
f5a180f019 Add dummy stunnel server to blocky
Hopefully this keeps the stunnel instance from failing if mysql and redis
are both unavailable
2024-08-30 11:13:53 -07:00
98c547ebdf Add authelia bypass for some favicons 2024-08-30 11:12:56 -07:00
fc5bce0757 Clean python requirements 2024-08-30 11:12:07 -07:00
2a58439ab5 Simplify passing blocky config to nomad 2024-08-30 11:09:59 -07:00
79648879ab Use new oidc module for setting up oidc with Authelia 2024-08-29 14:07:49 -07:00
9a76c9efef Upgrade nomad 2024-08-29 13:56:54 -07:00
52b0ec3bb6 Add oidc client module 2024-08-29 13:56:41 -07:00
cf43d32d06 Remove n2 host 2024-08-29 13:51:18 -07:00
03dc79c895 Update unifi-traffic-route 2024-08-27 15:29:24 -07:00
fafbb246ff Add oidc to photoprism 2024-08-27 15:28:37 -07:00
e99c5272cc Bump db mem 2024-08-21 20:03:08 -07:00
64b58230e6 Fix sonos list regex 2024-08-21 20:02:56 -07:00
95ca3f40d0 Use vars for external services 2024-08-21 20:02:18 -07:00
534bad2a03 Update nomad 2024-08-21 19:58:35 -07:00
58c483a051 Add overflow volume for some TV 2024-08-21 19:58:22 -07:00
84d7a68088 Make name of anon policy consistent between ansible and tf 2024-08-21 19:58:21 -07:00
8e8dbc3e65 Clean up of iot block lists 2024-07-17 20:08:38 -07:00
77c7c6b36c Disable authelia SMTP check to make aure it starts 2024-07-17 20:06:40 -07:00
505a6b5e8d Bump ytdlw to include deadlock fix 2024-06-27 09:36:57 -07:00
1307864afc Update ytdl to use a known system user 2024-06-26 13:32:54 -07:00
c5743a2578 Add ability to set docker user for services 2024-06-26 13:30:50 -07:00
bd67c60575 Make more things auto-revert if they are broken 2024-06-26 13:29:55 -07:00
3e8c03904d Fix block list for smarttvs in blocky config 2024-06-26 13:29:16 -07:00
408f526239 Remove ipv6 from blocky 2024-06-26 13:28:58 -07:00
c478ba4278 Auto refresh blocky lists when template change 2024-06-26 13:28:45 -07:00
9ee660cb6d Pin stunnel image to speed deployments
This will prevent redownload
2024-06-26 13:27:41 -07:00
2235a00f3b Refactor blocky lists to a new nomad var space to make them easier to manage 2024-06-24 17:04:03 -07:00
1f8014e740 Fix smarttv block lists to get from a domain that works
Also hard codes regex lists because they were formatted for PiHole and
not Blocky.
2024-06-24 13:54:30 -07:00
bc620987b7 Move from Gitea to Nomad Vars for custom block and allow
DNS doesn't route to internal addresses for git.thefij.rocks because
list lookups use bootstrap DNS servers, which don't know about it.
2024-06-24 13:53:34 -07:00
7477cb7227 Upgrade blocky and init fast 2024-06-24 13:53:13 -07:00
6906623fda Add ytdl-web 2024-06-13 16:23:55 -07:00
5547241d11 Upgrade photoprism 2024-06-08 13:40:40 -07:00
81093cedfb Increase memory for syslog jobs
Thry were getting OOM killed
2024-06-08 13:36:23 -07:00
7b41d29eb8 Add health checks and restarts to prometheus 2024-05-30 15:01:42 -07:00
90b7740343 Move Blocky and Exporters away from system to service jobs
This is because service jobs do not get rescheduled when allocs fail
2024-05-30 11:41:40 -07:00
e88c7c250d Bump nomad to 1.8 2024-05-30 11:40:58 -07:00
ed83ab0382 Remove qnomad due to disk errors 2024-05-30 11:40:28 -07:00
3cfbda7a27 Stop using diun for nomad fixers 2024-05-28 12:18:27 -07:00
85c626c96f Use Nomad task socket from Traefik 2024-05-28 12:00:13 -07:00
634d63c26c Stop diun for traffic routes
This was causing a check for each set of dead tasks
2024-05-28 11:45:30 -07:00
205388f283 Update traefik to v3 using canary 2024-05-28 11:43:46 -07:00
bdfde48bec Add some more monitors to nomad minitor 2024-05-06 14:29:17 -07:00
9af55580e7 Update diun config to read from task socket 2024-05-01 10:18:54 -07:00
b9c35bf18f Add ability to set task identities for service module 2024-05-01 10:18:24 -07:00
e7f740a2d9 Add languagetool server 2024-05-01 09:43:28 -07:00
57efee14e9 Update Ansible inventory to split node roles
Splits servers and clients to their own groups so that plays can target
specific roles.

Prior, everything was "both", but i want to and another server for
recovery purposes but not host containers on it.
2024-05-01 09:40:21 -07:00
c711c25737 Always use CF for dns when renewing lego certs
Makes it more resilient if my servers are down, but also cuts out a hop
because CF is the nameserver as well.
2024-04-27 19:33:10 -07:00
24122c2a3e Split fixers to their own groups
Allow them to deploy as different allocs on different hosts
2024-04-22 09:07:03 -07:00
13121862ec Add new host on qnap nas 2024-04-22 09:06:33 -07:00
28da3f425b Move nomad default interface to host vars 2024-04-22 09:06:11 -07:00
2d59886378 Update diun to include ability to read nomad socket 2024-04-17 10:46:28 -07:00
da0f52dab3 Improve change detection for cluster bootstrap 2024-04-17 10:46:10 -07:00
beac302a53 Upgrade nomad to 1.7.6 2024-04-17 10:45:27 -07:00
5edcb86e7e Remove traefik grafana dashboard
Now in data backups rather than git.
2024-03-26 14:56:14 -07:00
3dcd4c44b3 Tune memory after reviewing grafana 2024-03-26 09:48:31 -07:00
e6653f6495 Migrate sonarr to postgresql
And increase postgresql memory to accomodate
2024-03-25 16:05:58 -07:00
a9a919b8f2 Increase priority for sevices with highee resources
Photoprism requires lots if mem and sonar a specific volume
2024-03-22 21:09:19 -07:00
cc66bfdbcb Update photoprism 2024-03-22 21:07:55 -07:00
b02050112e Tune some service memeory 2024-03-22 21:07:07 -07:00
d5c2a0d185 Use default diun for syslogng 2024-03-22 21:05:53 -07:00
6a3ae49d8e Update terraform modules 2024-03-11 22:02:07 -07:00
75ee09d7e6 Remove bazarr
Plex does this automatically now
2024-02-20 10:13:40 -08:00
8b90aa0d74 Add 1.1.1.1 dns back to blocky for better resiliance 2024-02-20 10:10:41 -08:00
62e120ce51 Add radarr 2024-02-20 10:09:48 -08:00
5fb510202d Fix indent for Authelia rules 2024-02-20 10:05:25 -08:00
64a085ef80 Reatart failing services
Restart services that fail checks
2024-02-18 07:49:16 -08:00
f2f415aeac Fix traefik metrics 2024-02-18 07:47:31 -08:00
bb291b1f01 Move databases to their own tf files and improve first start 2024-02-13 12:05:55 -08:00
056eac976c lldap: Make it work on first bootstrap
Can't use the job id for creating the variables and permissions because we end up
with circular dependencies. The job won't return until it's successful in Nomad and it won't
start in nomad without access to varibles
2024-02-13 12:05:21 -08:00
198f96f3f7 Add back other traefik ports and metrics 2024-02-13 12:03:03 -08:00
6b5adbdf39 Remove 404 block list 2024-02-13 12:02:35 -08:00
77ef4b4167 Use quad9 encrypted dns 2024-02-13 12:02:14 -08:00
b35b8cecd5 Blocky: Remove mysql and redis configs from stunnel if server isn't found 2024-02-13 12:01:45 -08:00
b9dfeff6d8 Have blocky use router for upstream in nomad 2024-02-13 12:01:08 -08:00
2ff954b4b5 Bump nomad 2024-02-13 12:00:43 -08:00
2528dafcc6 Make nomad restart playbook more resilient 2024-02-13 12:00:24 -08:00
0e168376b8 Add terraform destroy to makefile 2024-02-13 11:59:47 -08:00
a16dc204fe Run dummy backup more frequently to make graphs easier to read 2024-01-24 20:10:14 -08:00
93d340c182 Make sure gitea ingress uses system wesher config
It was always using wesher
2024-01-23 12:09:59 -08:00
37ee67b2e6 fix: Add job_id output to services
This should be earlier in history
2024-01-23 12:09:29 -08:00
35dfeb3093 Add service healthchecks 2024-01-23 12:08:47 -08:00
0a2eace3dd Fix lldap secrets 2024-01-23 12:07:42 -08:00
6fe1b200f2 Update loki 2024-01-23 12:06:25 -08:00
c5d5ab42b8 Add some nomad actions for backups to test different formatting 2024-01-23 12:05:56 -08:00
efe7864cc9 Delay shutdowns of backup jobs to reduce killing those in progress 2024-01-23 12:05:20 -08:00
9ba74ce698 Use return vars for service acl 2024-01-16 14:16:21 -08:00
4fe3d46d5f Add external service acls for authelia 2024-01-16 14:15:56 -08:00
cf8bde7920 Add external traefik routes to nomad vars 2024-01-16 14:15:18 -08:00
bc87688f1a Move ldap secrets 2024-01-16 14:14:39 -08:00
3491c1f679 Add refresh make target 2024-01-16 14:04:44 -08:00
7b019e0787 Add auth to sonarr 2024-01-08 14:57:06 -08:00
0f19e2433f Upgrade sonarr to version 4 2024-01-08 10:14:53 -08:00
c01d45c7a2 Upgrade grafana to version 10 2024-01-08 10:11:42 -08:00
d07afe2319 Update traffic routes to handle null IPs
Eg. 0.0.0.0 for blocked domains
2024-01-06 16:23:45 -08:00
b025e4a87e Add repo unlock via Nomad action to backups 2024-01-06 16:22:20 -08:00
9be16fef1f Upgrade traefik to 2.10 2024-01-04 13:25:10 -08:00
c26da678b3 Small traefik cleanup
Remove fallback DNS since we only care about internal DNS

Use loopback address for accessing Nomad UI
2024-01-04 13:24:49 -08:00
6b9533ef71 Run traefik on multiple hosts 2024-01-04 13:24:15 -08:00
0bd995ec2b Traefik: Use nomad vars for dynamic certs
Rather than having Traefik handle cert fetching, instead
it is delegated to a separate job so that multiple Traefik
instances can share certs
2024-01-04 10:55:49 -08:00
0d340f3349 Periodic job to renew lego certs and store them in Nomad Variables
This will allow multiple instance of Traefik to serve certs.
2024-01-04 10:53:25 -08:00
bcad131aa7 Use job id for lldap acls 2024-01-04 10:53:23 -08:00
cda2842f8f Switch to image containing stunnel
Rather than installing on container startup, using an image with
stunnel pre-installed. This avoids issues with DNS breaking
the container on startup.
2024-01-03 13:50:49 -08:00
9544222961 Bump to 1.7.2 2023-12-29 20:47:58 -08:00
7bc4ae1f8b Reserve node memory to reduce OOM kills 2023-12-29 07:36:23 -08:00
1a3c096b65 Fix nomad fixers 2023-12-29 07:35:07 -08:00
25e533287d Fix gitea backups syntax 2023-12-18 12:23:21 -08:00
7e87002be2 Nomad 1.7 2023-12-18 12:22:19 -08:00
ab6906e989 Gitea backups 2023-12-10 20:39:33 -08:00
ca55209316 Fix blocky redis 2023-12-10 20:37:43 -08:00
1b49f015c5 Update blocky config to v0.22 schema 2023-11-30 14:00:27 -08:00
eb25138675 Remove defunct lists 2023-11-30 13:39:22 -08:00
69a0f760b4 Remove defunct lists 2023-11-30 13:39:01 -08:00
3fcedaddb7 Remove todo from traefik 2023-11-30 13:26:15 -08:00
bb34b434b8 Add custom blocklists hosted on my gitea server 2023-11-30 13:23:54 -08:00
36cdb8f41b Add Gitea
Currently it won't auto bootstrap auth. A command has to be executed one
time to get it to be added to the database.
2023-11-30 13:22:54 -08:00
cdd4e9b5d5 Fix custom ports for services 2023-11-30 13:22:53 -08:00
f06e90ab0d Remove hw transcode constraints from photoprism 2023-11-30 10:05:39 -08:00
2d733b278c Make backup jobids static so they work on clean deploy 2023-11-30 09:55:08 -08:00
b218633c2c Add scheduled job to update UniFi Traffic Routes
Because I use a custom DNS server, Domain based routing rules
don't work. This instead resolves the domains and then adds
the IP addresses to the rules.
2023-11-20 10:37:03 -08:00
e21ec11eb5 Fix grafana
Broken template
2023-11-20 10:35:49 -08:00
d6f9c2a7e4 Fix diun include tags variable
This fixes a configuration bug causing diun to include all tags by default.
2023-11-16 12:22:44 -08:00
891cfa7b2d Update blocky dashboard to not use consul tags 2023-11-16 12:21:59 -08:00
c11b8e157b Fix grafana dashboard provisioning
A path mismatch existed after migrating to alloc storage
2023-11-16 12:21:40 -08:00
0d208b7394 Add dummy backup job to keep backup task running on all hosts
Otherwise, if a client is not running any stateful services, the task
will fail and Nomad will eventually stop retrying. If a service gets
relocated to the host, the task is not restarted. This makes sure the
task will cover moved services and make it more easy to determine that
backups are healthy.
2023-11-16 12:19:19 -08:00
9b347880cc Ignore some regeneratable files in backups
Reduces the size of Lidarr and Photoprism backups
2023-11-07 16:50:02 -08:00
a0185d9642 Bump resources for backups to allow more memory
Was getting OOM killed
2023-11-07 16:49:27 -08:00
f2f5f4407c Add TZ to restic-scheduler 2023-11-07 16:48:57 -08:00
0b3d3caff6 Update restic-scheduler to fix index out of range error 2023-11-07 16:48:41 -08:00
52abd94a38 Use minio as restic repo rather than sftp
I've been getting a lot of restic lock errors using sftp
2023-11-06 16:35:13 -08:00
0391fd95ad Allow fixers to actually fix things 2023-11-06 14:41:54 -08:00
df1ae60936 Add change_script to service module 2023-11-06 14:41:13 -08:00
a2d33ac309 Add proxmox influxdb to Grafana 2023-10-23 13:10:01 -07:00
1b48892172 Add read-only implementation of fixers as scheduled batches 2023-10-23 12:59:45 -07:00
48a48bb080 Move sonarr and nzbget to their own jobs 2023-10-23 12:59:11 -07:00
bd2c5ca3db Put restic cache on ephemeral disk 2023-10-23 08:54:05 -07:00
48074bdc39 Fix log from orphaned services to not say deleting when it's in dry run. 2023-10-19 12:08:28 -07:00
2f3fc87f12 Consider job status when detecting missing services
Prevents false alarms and attempts to restart failed or stopped allocs.
2023-10-19 12:07:57 -07:00
369802cacc Bump Postgres memory to 500mb 2023-10-19 12:07:14 -07:00
0c3f98d5c3 Pin Grafana to amd64 since renderer requires it.
This could be mitigated by moving the renderer to another task group.
2023-10-19 12:06:47 -07:00
b97cfb68ad Minor Nomad bmp 1.6.2 2023-10-19 12:05:52 -07:00
d83591cfd4 Make diun disk sticky 2023-09-27 21:34:14 -07:00
f80eccdfa2 Update diun to use global defaults 2023-09-27 21:33:55 -07:00
5fe30d005b Update missing services script to restart allocs 2023-09-27 21:30:48 -07:00
ad439d48f3 Add waiting for loki and prom dependencies in core 2023-09-27 21:30:22 -07:00
df4737655a Remount network shares when recovering cluster 2023-09-27 21:26:44 -07:00
b29f405090 Bump prometheus versiosn and pin blocky 2023-09-18 21:58:43 -07:00
cf90248430 Remove old Consul and Vault references 2023-09-17 21:43:04 -07:00
72a108753b Bump lldap to latest release 2023-09-14 12:14:07 -07:00
8dd00c1249 authelia and grafana to shared smtp secrets 2023-08-29 15:11:40 -07:00
edeb6cf444 lldap: access shared smtp secrets 2023-08-29 14:56:06 -07:00
2bd939e651 Remove deprecated hcl2 enabled 2023-08-29 13:02:04 -07:00
ea8ca478c6 Fix blocky acl 2023-08-29 12:59:14 -07:00
769965ae6b Fix lidarr to use postgres db creds 2023-08-29 12:52:30 -07:00
f5898b0283 Add workload ACL management for mysql and postgres access
Allows required jobs to access shared secrets and auto generates psks
for stunnel.

Currently supporting MySQL, Postgres, and LDAP.
2023-08-29 12:48:48 -07:00
cdba6aa24f Add script to check for missing services 2023-08-28 13:54:24 -07:00
50447b9a7d Add a read-only check for orphaned service script 2023-08-28 13:54:06 -07:00
08f92f8ba5 Add new backup and orphaned service scripts 2023-08-26 15:59:50 -07:00
b13e31d9f8 Move scripts to subdir 2023-08-26 15:58:57 -07:00
a57e87d21f Fix var path for adhoc backup jobs 2023-08-26 15:56:21 -07:00
2efc7f8c2f Add ability to set job meta for services 2023-08-24 15:41:18 -07:00
7aa5b800ba Clean up finally rendered templates for services 2023-08-24 15:37:42 -07:00
013dd8248b Make base_hostname more configurable 2023-08-24 15:03:36 -07:00
f6dd3f4284 Clean up root module and move lldap to databases 2023-08-24 13:52:03 -07:00
4a7bff7611 Move metrics out of a module and into core 2023-08-24 13:00:36 -07:00
b4a6901687 Bump up sonarr memory a little more 2023-08-24 12:51:32 -07:00
d5078b24da Refactor use of wesher to be behind a variable toggle
Occasionally I run into issues with Wesher. This makes it easier to
disable use of Wesher by setting TF_VAR_use_wesher to false.
2023-08-24 12:51:32 -07:00
e2c35a82a9 Fix grafana config loading
For some reason, the env variable method stoped working.
2023-08-24 11:59:10 -07:00
50507b2aa8 Fix eol on readme 2023-08-24 11:53:54 -07:00
0cfd052a6e Move backup module to a module under root 2023-08-24 11:53:08 -07:00
1715b58ca9 Pin image versions for more critical services 2023-08-24 11:39:00 -07:00
440c0b0c4c Move redis commander 2023-08-24 11:37:13 -07:00
47da10febf Remove unused caddy module 2023-08-24 11:11:36 -07:00
0a8395e8fa Add bazarr configs 2023-08-21 10:54:57 -07:00
05c367e531 Try to format time zones from minitor 2023-08-18 12:15:34 -07:00
32b5e420bc Fix incorrect README
Referenced Ansible when it should have been Nomad variables.
2023-08-13 20:54:46 -07:00
9d8aa49b31 Improve README.md documentation
Gove more details in README.md
2023-08-13 20:53:11 -07:00
dcb9f7d26f Add log alert back to minitor 2023-08-11 07:08:00 -04:00
64d61d69a1 Fix Plex minitor check 2023-08-11 06:52:51 -04:00
92e42b5605 Update and add time format to minitor 2023-08-11 03:49:55 -07:00
b62029be0a Lower photoprism resources to make it easier to schedule 2023-08-10 15:56:12 -07:00
ddeb8fffbc Move services to their own tf files for easier locating 2023-08-07 11:37:19 -07:00
41c9d3d6f6 Adjust down default service stunnel sidecar resources
Keep photoprism and lidarr, database heavy tools, at the same level
2023-08-07 11:31:35 -07:00
02959c7673 Update minitor with new apps and global options 2023-08-03 14:39:50 -07:00
3e0533954f Add authelia backup job 2023-08-03 10:36:42 -07:00
df5ed00f05 Update backup job config to iterate over job files
This will prevent new ones from being added and not included
2023-08-03 10:33:11 -07:00
0a5480129e Remove nextcloud since it's not used 2023-08-03 10:32:34 -07:00
946873e5ad Make sure existing jobs are loaded 2023-08-03 10:21:34 -07:00
d8f8884cb8 Improve backup job configuration
Add lidarr, fix hosts to 'nomad' since host names change with containers and nodes
and don't make a difference, make most jobs daily, exclude sonarr and lidarr zip backups
from restic backups.
2023-08-03 10:11:57 -07:00
e63327428f Update backups to v0.2.0 to include postgres 2023-08-03 09:53:31 -07:00
a2d24e03cd Deploy adhoc backups to all hosts 2023-08-03 09:53:03 -07:00
f66bd95fbb Run backup batches on all hosts 2023-08-02 21:33:16 -07:00
fa0da05343 Change authelia port to avoid conflict with prometheus 2023-08-02 21:31:08 -07:00
2844493fa1 Increase pgsql and lidarr memory to prevent crashes on library 2023-07-31 10:43:51 -07:00
4b94f66786 Increase Traefik memory 2023-07-31 10:43:03 -07:00
c2632ee7c0 Mount pgdata to propper path 2023-07-26 23:24:09 -07:00
f333031c25 bootstrap blocky with stunnel 2023-07-26 23:23:23 -07:00
254ef01de9 Increase lidarr resources 2023-07-26 15:30:05 -07:00
b5ab68e6f3 Fix postgres host volume 2023-07-26 15:29:52 -07:00
8f6bed297c Upgrade to nomad 1.6.1 2023-07-26 15:29:39 -07:00
882b93a4c5 Abort nomad recovery if any hosts fail 2023-07-26 15:27:46 -07:00
0d37652447 Add pre-commit hook to make sure variable sample is up to date 2023-07-25 16:57:44 -07:00
a52c2bc6c7 Run pre-commit on everything 2023-07-25 16:57:44 -07:00
70098930f8 Add lidarr 2023-07-25 16:57:33 -07:00
e7c985d276 Allow adminer to connect to postgres 2023-07-25 16:57:33 -07:00
0ea9da3a53 Update postgres bootstrap allowing multiple databases 2023-07-25 16:57:33 -07:00
ac29343d96 Add postgres stunnel and service bootstrap 2023-07-25 10:59:33 -07:00
f8478ae6c9 Service Template: Make sure stunnel is there for ldap 2023-07-25 10:30:28 -07:00
f0d31ff13c Move stunnel psks to a more restrictive path 2023-07-25 10:16:30 -07:00
138 changed files with 4743 additions and 3333 deletions

View File

@ -18,9 +18,17 @@ repos:
- id: check-added-large-files
- id: check-merge-conflict
- id: end-of-file-fixer
exclude: "^ansible_playbooks/vars/nomad_vars.sample.yml$"
- id: trailing-whitespace
- repo: https://github.com/Yelp/detect-secrets
rev: v1.4.0
hooks:
- id: detect-secrets
args: ['--baseline', '.secrets-baseline']
- repo: local
hooks:
- id: variable-sample
name: generate variable sample file
language: system
entry: bash -c 'venv/bin/python scripts/nomad_vars.py print > ./ansible_playbooks/vars/nomad_vars.sample.yml'
types: [file]

View File

@ -75,6 +75,10 @@
{
"path": "detect_secrets.filters.allowlist.is_line_allowlisted"
},
{
"path": "detect_secrets.filters.common.is_baseline_file",
"filename": ".secrets-baseline"
},
{
"path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies",
"min_level": 2
@ -114,89 +118,74 @@
}
],
"results": {
"ansible_playbooks/vars/vault_hashi_vault_values.example.yml": [
{
"type": "Secret Keyword",
"filename": "ansible_playbooks/vars/vault_hashi_vault_values.example.yml",
"hashed_secret": "f2baa52d02ca888455ce47823f47bf372d5eecb3",
"is_verified": false,
"line_number": 8
},
{
"type": "Secret Keyword",
"filename": "ansible_playbooks/vars/vault_hashi_vault_values.example.yml",
"hashed_secret": "18960546905b75c869e7de63961dc185f9a0a7c9",
"is_verified": false,
"line_number": 10
},
{
"type": "Secret Keyword",
"filename": "ansible_playbooks/vars/vault_hashi_vault_values.example.yml",
"hashed_secret": "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33",
"is_verified": false,
"line_number": 22
}
],
"core/authelia.yml": [
{
"type": "Secret Keyword",
"filename": "core/authelia.yml",
"hashed_secret": "7cb6efb98ba5972a9b5090dc2e517fe14d12cb04",
"is_verified": false,
"line_number": 54
"line_number": 54,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/authelia.yml",
"hashed_secret": "a32b08d97b1615dc27f58b6b17f67624c04e2c4f",
"is_verified": false,
"line_number": 191
"line_number": 201,
"is_secret": false
}
],
"core/metrics/grafana/grafana.ini": [
"core/grafana/grafana.ini": [
{
"type": "Basic Auth Credentials",
"filename": "core/metrics/grafana/grafana.ini",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4",
"is_verified": false,
"line_number": 78
"line_number": 78,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/metrics/grafana/grafana.ini",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "55ebda65c08313526e7ba08ad733e5ebea9900bd",
"is_verified": false,
"line_number": 109
"line_number": 109,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/metrics/grafana/grafana.ini",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "d033e22ae348aeb5660fc2140aec35850c4da997",
"is_verified": false,
"line_number": 151
"line_number": 151,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/metrics/grafana/grafana.ini",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "10bea62ff1e1a7540dc7a6bc10f5fa992349023f",
"is_verified": false,
"line_number": 154
"line_number": 154,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/metrics/grafana/grafana.ini",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "5718bce97710e6be87ea160b36eaefb5032857d3",
"is_verified": false,
"line_number": 239
"line_number": 239,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "core/metrics/grafana/grafana.ini",
"filename": "core/grafana/grafana.ini",
"hashed_secret": "10aed9d7ebef778a9b3033dba3f7813b639e0d50",
"is_verified": false,
"line_number": 252
"line_number": 252,
"is_secret": false
}
]
},
"generated_at": "2023-07-11T19:43:38Z"
"generated_at": "2024-08-30T18:12:43Z"
}

View File

@ -2,20 +2,39 @@
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.20"
version = "2.2.0"
hashes = [
"h1:M/QVXHPfeySejJZI3I8mBYrL/J9VsbnyF/dKIMlUhXo=",
"zh:02989edcebe724fc0aa873b22176fd20074c4f46295e728010711a8fc5dfa72c",
"zh:089ba7d19bcf5c6bab3f8b8c5920eb6d78c52cf79bb0c5dfeb411c600e7efcba",
"zh:235865a2182ca372bcbf440201a8b8cc0715ad5dbc4de893d99b6f32b5be53ab",
"zh:67ea718764f3f344ecc6e027d20c1327b86353c8064aa90da3ec12cec4a88954",
"h1:BAjqzVkuXxHtRKG+l9unaZJPk2kWZpSTCEcQPRcl2so=",
"zh:052f909d25121e93dc799290216292fca67943ccde12ba515068b838a6ff8c66",
"zh:20e29aeb9989f7a1e04bb4093817c7acc4e1e737bb21a3066f3ea46f2001feff",
"zh:2326d101ef427599b72cce30c0e0c1d18ae783f1a897c20f2319fbf54bab0a61",
"zh:3420cbe4fd19cdc96d715d0ae8e79c272608023a76033bbf582c30637f6d570f",
"zh:41ec570f87f578f1c57655e2e4fbdb9932d94cf92dc9cd11828cccedf36dd4a4",
"zh:5f90dcc58e3356ffead82ea211ecb4a2d7094d3c2fbd14ff85527c3652a595a2",
"zh:64aaa48609d2db868fcfd347490df0e12c6c3fcb8e4f12908c5d52b1a0adf73f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:8c68c540f0df4980568bdd688c2adec86eda62eb2de154e3db215b16de0a7ae0",
"zh:911969c63a69a733be57b96d54c5966c9424e1abec8d5f20038c8cef3a504c65",
"zh:a673c92ddc9d47e8d53dcb9b376f1adcb4543488202fc83a3e7eab8677530684",
"zh:a94a73eae89fd8c8ebf872013079be41161d3f293f4026c92d45c4c5667dd613",
"zh:db6b89f8b696040c0344f00928e4cf6e0a75034421ba14cdcd8a4d23bc865dce",
"zh:e512c0b1239e3d66b60d22c2b4de19fea288e492cde90dff9277cc475fd9dbbf",
"zh:ef6eccecbdef3bb8ce629cabfb5550c1db5c3e952943dda1786ef6cb470a8c23",
"zh:86b4923e10e6ba407d1d2aab83740b702058e8b01460af4f5f0e4008f40e492c",
"zh:ae89dcba33097af33a306344d20e4e25181f15dcc1a860b42db5b7199a97c6a6",
"zh:ce56d68cdfba60891765e94f9c0bf69eddb985d44d97db9f91874bea027f08e2",
"zh:e993bcde5dbddaedf3331e3014ffab904f98ab0f5e8b5d6082b7ca5083e0a2f1",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0"
hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
]
}

View File

@ -62,7 +62,7 @@ ansible-cluster: $(VENV) ansible_galaxy
bootstrap-values: $(VENV)
env NOMAD_ADDR=http://192.168.2.101:4646 \
NOMAD_TOKEN=$(shell jq -r .SecretID nomad_bootstrap.json) \
$(VENV)/bin/python ./nomad_vars.py
$(VENV)/bin/python ./scripts/nomad_vars.py
.PHONY: recover-nomad
recover-nomad: $(VENV)
@ -87,6 +87,16 @@ apply:
-auto-approve \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: refresh
refresh:
@terraform refresh \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: destroy
destroy:
@terraform destroy \
-var "nomad_secret_id=$(shell jq -r .SecretID nomad_bootstrap.json)" \
.PHONY: clean
clean:
env VIRTUAL_ENV=$(VENV) $(VENV)/bin/ansible-playbook -vv \

View File

@ -2,6 +2,8 @@
My configuration for creating my home Nomad cluster and deploying services to it.
This repo is not designed as general purpose templates, but rather to fit my specific needs. That said, I have made an effort for things to be as useful as possible for someone wanting to use or modify this.
## Running
make all
@ -12,10 +14,33 @@ Both Ansible and Terraform are used as part of this configuration. All hosts mus
To begin, Ansible runs a playbook to setup the cluster. This includes installing Nomad, bootstrapping the cluster and ACLs, setting up NFS shares, creating Nomad Host Volumes, and setting up Wesher as a Wireguard mesh between hosts.
After this is complete, Ansible variables must be set for services to access and configure correctly. This depends on variables to be set based on the sample file.
After this is complete, Nomad variables must be set for services to access and configure correctly. This depends on variables to be set based on the sample file.
Finally, the Terraform configuration can be applied setting up all services deployed on the cluster.
The configuration of new services is intended to be as templated as possible and to avoid requiring changes in multiple places. For example, most services are configured with a template that provides reverse proxy, DNS records, database tunnels, database bootstrapping, metrics scraping, and authentication. The only real exception is backups, which requires a distinct job file, for now.
## What does it do?
* Nomad cluster for scheduling and configuring all services
* Blocky DNS servers with integrated ad blocking. This also provides service discovery
* Prometheus with autodiscovery of service metrics
* Loki and Promtail aggregating logs
* Minitor for service availability checks
* Grafana providing dashboards, alerting, and log searching
* Photoprism for photo management
* Remote and shared volumes over NFS
* Authelia for OIDC and Proxy based authentication with 2FA
* Sonarr and Lidarr for multimedia management
* Automated block based backups using Restic
## Step by step
1. Update hosts in `ansible_playbooks/ansible_hosts.yml`
2. Update `ansible_playbook/setup-cluster.yml`
1. Update backup DNS server
2. Update NFS shares from NAS
3. Update volumes to make sure they are valid paths
3. Create `ansible_playbooks/vars/nomad_vars.yml` based on the sample file. TODO: This is quite specific and probably impossible without more documentation
4. Run `make all`
5. Update your network DNS settings to use the new servers IP addresses

View File

@ -1,6 +1,6 @@
resource "nomad_acl_policy" "anon_policy" {
name = "anonymous"
description = "Anon RO"
description = "Anon read only"
rules_hcl = file("${path.module}/nomad-anon-policy.hcl")
}

View File

@ -1,68 +1,72 @@
---
all:
children:
servers:
hosts:
n1.thefij:
nomad_node_role: both
# nomad_meta:
# hw_transcode.device: /dev/dri
# hw_transcode.type: intel
nfs_mounts:
- src: 10.50.250.2:/srv/volumes
path: /srv/volumes/moxy
opts: proto=tcp,rw
nomad_unique_host_volumes:
- name: mysql-data
path: /srv/volumes/mysql
owner: "999"
group: "100"
mode: "0755"
read_only: false
- name: postgres-data
path: /srv/volumes/postgres
owner: "999"
group: "999"
mode: "0755"
read_only: false
n2.thefij:
nfs_mounts:
- src: 10.50.250.2:/srv/volumes
path: /srv/volumes/moxy
opts: proto=tcp,rw
nomad_node_class: ingress
nomad_node_role: both
nomad_unique_host_volumes:
- name: nextcloud-data
path: /srv/volumes/nextcloud
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: gitea-data
path: /srv/volumes/gitea
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: sonarr-data
path: /srv/volumes/sonarr
owner: "root"
group: "bin"
mode: "0755"
read_only: false
pi4:
nomad_node_role: both
nomad_meta:
hw_transcode.device: /dev/video11
hw_transcode.type: raspberry
hosts:
n1.thefij:
nomad_node_class: ingress
nomad_reserved_memory: 1024
# nomad_meta:
# hw_transcode.device: /dev/dri
# hw_transcode.type: intel
nfs_mounts:
- src: 10.50.250.2:/srv/volumes
path: /srv/volumes/moxy
opts: proto=tcp,rw
nomad_unique_host_volumes:
- name: mysql-data
path: /srv/volumes/mysql
owner: "999"
group: "100"
mode: "0755"
read_only: false
- name: postgres-data
path: /srv/volumes/postgres
owner: "999"
group: "999"
mode: "0755"
read_only: false
# n2.thefij:
# nomad_node_class: ingress
# nomad_reserved_memory: 1024
# nfs_mounts:
# - src: 10.50.250.2:/srv/volumes
# path: /srv/volumes/moxy
# opts: proto=tcp,rw
# nomad_unique_host_volumes:
# - name: nextcloud-data
# path: /srv/volumes/nextcloud
# owner: "root"
# group: "bin"
# mode: "0755"
# read_only: false
pi4:
nomad_node_class: ingress
nomad_reserved_memory: 512
nomad_meta:
hw_transcode.device: /dev/video11
hw_transcode.type: raspberry
qnomad.thefij:
ansible_host: 192.168.2.234
nomad_reserved_memory: 1024
# This VM uses a non-standard interface
nomad_network_interface: ens3
consul_instances:
children:
servers: {}
vault_instances:
children:
servers: {}
nomad_instances:
children:
servers: {}
nomad_instances:
vars:
nomad_network_interface: eth0
children:
nomad_servers: {}
nomad_clients: {}
nomad_servers:
hosts:
nonopi.thefij:
ansible_host: 192.168.2.170
n1.thefij: {}
# n2.thefij: {}
pi4: {}
# qnomad.thefij: {}
nomad_clients:
hosts:
n1.thefij: {}
# n2.thefij: {}
pi4: {}
# qnomad.thefij: {}

View File

@ -1,80 +0,0 @@
---
- name: Bootstrap Consul values
hosts: consul_instances
gather_facts: false
vars_files:
- vars/consul_values.yml
tasks:
- name: Add values
delegate_to: localhost
run_once: true
block:
- name: Install python-consul
pip:
name: python-consul
extra_args: --index-url https://pypi.org/simple
- name: Write values
consul_kv:
host: "{{ inventory_hostname }}"
key: "{{ item.key }}"
value: "{{ item.value }}"
loop: "{{ consul_values | default({}) | dict2items }}"
- name: Bootstrap value values
hosts: vault_instances
gather_facts: false
vars_files:
- vars/vault_hashi_vault_values.yml
tasks:
- name: Bootstrap Vault secrets
delegate_to: localhost
run_once: true
block:
- name: Install hvac
pip:
name: hvac
extra_args: --index-url https://pypi.org/simple
- name: Check mount
community.hashi_vault.vault_read:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "/sys/mounts/kv"
ignore_errors: true
register: check_mount
- name: Create kv mount
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "/sys/mounts/kv"
data:
type: kv-v2
when: check_mount is not succeeded
- name: Write values
no_log: true
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "kv/data/{{ item.key }}"
data:
data:
"{{ item.value }}"
loop: "{{ hashi_vault_values | default({}) | dict2items }}"
retries: 2
delay: 10
- name: Write userpass
no_log: true
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "auth/userpass/users/{{ item.name }}"
data: '{"password": "{{ item.password }}", "policies": "{{ item.policies }}"}'
loop: "{{ vault_userpass }}"

View File

@ -1,27 +1,5 @@
# Stops Consul, Vault, and Nomad and clears all data from their data dirs
# Stops Nomad and clears all data from its ata dirs
---
- name: Delete Consul data
hosts: consul_instances
tasks:
- name: Stop consul
systemd:
name: consul
state: stopped
become: true
- name: Stop vault
systemd:
name: vault
state: stopped
become: true
- name: Remove data dir
file:
path: /opt/consul
state: absent
become: true
- name: Delete Nomad data
hosts: nomad_instances

View File

@ -14,8 +14,14 @@
state: restarted
become: true
- name: Start Docker
systemd:
name: docker
state: started
become: true
- name: Start Nomad
systemd:
name: nomad
state: stopped
state: started
become: true

View File

@ -1,88 +0,0 @@
---
- name: Stop Nomad
hosts: nomad_instances
tasks:
- name: Stop Nomad
systemd:
name: nomad
state: stopped
become: true
- name: Stop Vault
hosts: vault_instances
gather_facts: false
tasks:
- name: Stop Vault
systemd:
name: vault
state: stopped
become: true
- name: Recover Consul
hosts: consul_instances
gather_facts: false
tasks:
- name: Stop Consul
systemd:
name: consul
state: stopped
become: true
- name: Get node-id
slurp:
src: /opt/consul/node-id
register: consul_node_id
become: true
- name: Node Info
debug:
msg: |
node_id: {{ consul_node_id.content | b64decode }}
address: {{ ansible_default_ipv4.address }}
- name: Save
copy:
dest: "/opt/consul/raft/peers.json"
# I used to have reject('equalto', inventory_hostname) in the loop, but I'm not sure if I should
content: |
[
{% for host in ansible_play_hosts -%}
{
"id": "{{ hostvars[host].consul_node_id.content | b64decode }}",
"address": "{{ hostvars[host].ansible_default_ipv4.address }}:8300",
"non_voter": false
}{% if not loop.last %},{% endif %}
{% endfor -%}
]
become: true
- name: Restart Consul
systemd:
name: consul
state: restarted
become: true
- name: Start Vault
hosts: vault_instances
gather_facts: false
tasks:
- name: Start Vault
systemd:
name: vault
state: started
become: true
- name: Start Nomad
hosts: nomad_instances
gather_facts: false
tasks:
- name: Start Nomad
systemd:
name: nomad
state: started
become: true

View File

@ -1,6 +1,7 @@
---
- name: Recover Nomad
hosts: nomad_instances
hosts: nomad_servers
any_errors_fatal: true
tasks:
- name: Stop Nomad
@ -9,6 +10,10 @@
state: stopped
become: true
- name: Remount all shares
command: mount -a
become: true
- name: Get node-id
slurp:
src: /var/nomad/server/node-id

View File

@ -1,6 +1,6 @@
---
- name: Update DNS for bootstrapping with non-Nomad host
hosts: consul_instances
hosts: nomad_instances
become: true
gather_facts: false
vars:
@ -14,7 +14,7 @@
line: "nameserver {{ non_nomad_dns }}"
- name: Install Docker
hosts: nomad_instances
hosts: nomad_clients
become: true
vars:
docker_architecture_map:
@ -44,7 +44,7 @@
# state: present
- name: Create NFS mounts
hosts: nomad_instances
hosts: nomad_clients
become: true
vars:
shared_nfs_mounts:
@ -56,6 +56,10 @@
path: /srv/volumes/media-write
opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Overflow
path: /srv/volumes/nas-overflow
opts: proto=tcp,port=2049,rw
- src: 192.168.2.10:/Photos
path: /srv/volumes/photos
opts: proto=tcp,port=2049,rw
@ -97,6 +101,12 @@
group: "root"
mode: "0755"
read_only: false
- name: media-overflow-write
path: /srv/volumes/nas-overflow/Media
owner: "root"
group: "root"
mode: "0755"
read_only: false
- name: media-downloads
path: /srv/volumes/media-write/Downloads
read_only: false
@ -112,6 +122,24 @@
- name: nzbget-config
path: /srv/volumes/nas-container/nzbget
read_only: false
- name: sonarr-config
path: /srv/volumes/nas-container/sonarr
read_only: false
- name: lidarr-config
path: /srv/volumes/nas-container/lidarr
read_only: false
- name: radarr-config
path: /srv/volumes/nas-container/radarr
read_only: false
- name: bazarr-config
path: /srv/volumes/nas-container/bazarr
read_only: false
- name: gitea-data
path: /srv/volumes/nas-container/gitea
read_only: false
- name: ytdl-web
path: /srv/volumes/nas-container/ytdl-web
read_only: false
- name: all-volumes
path: /srv/volumes
owner: "root"
@ -122,9 +150,10 @@
roles:
- name: ansible-nomad
vars:
nomad_version: "1.6.0-1"
nomad_version: "1.8.4-1"
nomad_install_upgrade: true
nomad_allow_purge_config: true
nomad_node_role: "{% if 'nomad_clients' in group_names %}{% if 'nomad_servers' in group_names %}both{% else %}client{% endif %}{% else %}server{% endif %}"
# Where nomad gets installed to
nomad_bin_dir: /usr/bin
@ -178,7 +207,8 @@
nomad_bind_address: 0.0.0.0
# Default interface for binding tasks
nomad_network_interface: eth0
# This is now set at the inventory level
# nomad_network_interface: eth0
# Create networks for binding task ports
nomad_host_networks:
@ -197,7 +227,7 @@
enabled: true
- name: Bootstrap Nomad ACLs and scheduler
hosts: nomad_instances
hosts: nomad_servers
tasks:
- name: Start Nomad
@ -227,6 +257,7 @@
run_once: true
ignore_errors: true
register: bootstrap_result
changed_when: bootstrap_result is succeeded
- name: Save bootstrap result
copy:
@ -258,13 +289,15 @@
- list
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
run_once: true
register: policies
run_once: true
changed_when: false
- name: Copy policy
copy:
src: ../acls/nomad-anon-policy.hcl
dest: /tmp/anonymous.policy.hcl
delegate_to: "{{ play_hosts[0] }}"
run_once: true
register: anon_policy
@ -275,7 +308,7 @@
- acl
- policy
- apply
- -description="Anon read only"
- -description=Anon read only
- anonymous
- /tmp/anonymous.policy.hcl
environment:
@ -284,6 +317,18 @@
delegate_to: "{{ play_hosts[0] }}"
run_once: true
- name: Read scheduler config
command:
argv:
- nomad
- operator
- scheduler
- get-config
- -json
run_once: true
register: scheduler_config
changed_when: false
- name: Enable service scheduler preemption
command:
argv:
@ -291,12 +336,24 @@
- operator
- scheduler
- set-config
- -preempt-system-scheduler=true
- -preempt-service-scheduler=true
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
delegate_to: "{{ play_hosts[0] }}"
run_once: true
when: (scheduler_config.stdout | from_json)["SchedulerConfig"]["PreemptionConfig"]["ServiceSchedulerEnabled"] is false
- name: Enable system scheduler preemption
command:
argv:
- nomad
- operator
- scheduler
- set-config
- -preempt-system-scheduler=true
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
run_once: true
when: (scheduler_config.stdout | from_json)["SchedulerConfig"]["PreemptionConfig"]["SystemSchedulerEnabled"] is false
# - name: Set up Nomad backend and roles in Vault
# community.general.terraform:

View File

@ -1,27 +0,0 @@
---
- name: Unseal Vault
hosts: vault_instances
tasks:
- name: Get Vault status
uri:
url: http://127.0.0.1:8200/v1/sys/health
method: GET
status_code: 200, 429, 472, 473, 501, 503
body_format: json
return_content: true
register: vault_status
- name: Unseal Vault
no_log: true
command:
argv:
- "vault"
- "operator"
- "unseal"
- "-address=http://127.0.0.1:8200/"
- "{{ item }}"
loop: "{{ unseal_keys_hex }}"
when:
- unseal_keys_hex is defined
- vault_status.json["sealed"]

View File

@ -2,56 +2,53 @@ nomad/jobs:
base_hostname: VALUE
db_user_ro: VALUE
ldap_base_dn: VALUE
mysql_root_password: VALUE
notify_email: VALUE
smtp_password: VALUE
smtp_port: VALUE
smtp_server: VALUE
smtp_tls: VALUE
smtp_user: VALUE
nomad/jobs/adminer:
mysql_stunnel_psk: VALUE
nomad/jobs/authelia:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
email_sender: VALUE
jwt_secret: VALUE
ldap_stunnel_psk: VALUE
lldap_admin_password: VALUE
lldap_admin_user: VALUE
mysql_stunnel_psk: VALUE
oidc_clients: VALUE
oidc_hmac_secret: VALUE
oidc_issuer_certificate_chain: VALUE
oidc_issuer_private_key: VALUE
redis_stunnel_psk: VALUE
session_secret: VALUE
storage_encryption_key: VALUE
nomad/jobs/authelia/authelia/stunnel:
redis_stunnel_psk: VALUE
nomad/jobs/backup:
backup_passphrase: VALUE
mysql_stunnel_psk: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-n1:
backup_passphrase: VALUE
mysql_stunnel_psk: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-n2:
backup_passphrase: VALUE
mysql_stunnel_psk: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/backup-oneoff-pi4:
backup_passphrase: VALUE
mysql_stunnel_psk: VALUE
nas_ftp_host: VALUE
nas_ftp_pass: VALUE
nas_ftp_user: VALUE
nas_minio_access_key_id: VALUE
nas_minio_secret_access_key: VALUE
nomad/jobs/bazarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/blocky:
db_name: VALUE
db_pass: VALUE
@ -59,7 +56,6 @@ nomad/jobs/blocky:
mappings: VALUE
whitelists_ads: VALUE
nomad/jobs/blocky/blocky/stunnel:
mysql_stunnel_psk: VALUE
redis_stunnel_psk: VALUE
nomad/jobs/ddclient:
domain: VALUE
@ -67,11 +63,13 @@ nomad/jobs/ddclient:
zone: VALUE
nomad/jobs/diun:
slack_hook_url: VALUE
nomad/jobs/gitea:
nomad/jobs/git:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
oidc_secret: VALUE
secret_key: VALUE
smtp_sender: VALUE
nomad/jobs/grafana:
admin_pw: VALUE
alert_email_addresses: VALUE
@ -88,34 +86,19 @@ nomad/jobs/grafana:
slack_hook_url: VALUE
smtp_password: VALUE
smtp_user: VALUE
nomad/jobs/grafana/grafana/stunnel:
mysql_stunnel_psk: VALUE
nomad/jobs/immich:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/ipdvr/bazarr:
nomad/jobs/lego:
acme_email: VALUE
domain_lego_dns: VALUE
usersfile: VALUE
nomad/jobs/lidarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/ipdvr/bazarr/bootstrap:
superuser: VALUE
superuser_pass: VALUE
nomad/jobs/ipdvr/lidarr:
db_pass: VALUE
db_user: VALUE
nomad/jobs/ipdvr/lidarr/bootstrap:
superuser: VALUE
superuser_pass: VALUE
nomad/jobs/ipdvr/radarr:
db_pass: VALUE
db_user: VALUE
nomad/jobs/ipdvr/radarr/bootstrap:
superuser: VALUE
superuser_pass: VALUE
nomad/jobs/lldap:
admin_email: VALUE
admin_password: VALUE
admin_user: VALUE
db_name: VALUE
db_pass: VALUE
db_user: VALUE
@ -123,38 +106,46 @@ nomad/jobs/lldap:
key_seed: VALUE
smtp_from: VALUE
smtp_reply_to: VALUE
nomad/jobs/lldap/lldap/bootstrap:
mysql_root_password: VALUE
nomad/jobs/lldap/lldap/stunnel:
allowed_psks: VALUE
mysql_stunnel_psk: VALUE
nomad/jobs/minitor:
mailgun_api_key: VALUE
nomad/jobs/mysql-server:
allowed_psks: VALUE
root_password: VALUE
mysql_root_password: VALUE
nomad/jobs/photoprism:
admin_password: VALUE
admin_user: VALUE
db_name: VALUE
db_pass: VALUE
db_user: VALUE
mysql_stunnel_psk: VALUE
oidc_secret: VALUE
nomad/jobs/postgres-server:
superuser: VALUE
superuser_pass: VALUE
nomad/jobs/radarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/redis-authelia:
allowed_psks: VALUE
nomad/jobs/redis-blocky:
allowed_psks: VALUE
nomad/jobs/rediscommander:
redis_stunnel_psk: VALUE
nomad/jobs/sonarr:
db_name: VALUE
db_pass: VALUE
db_user: VALUE
nomad/jobs/traefik:
acme_email: VALUE
domain_lego_dns: VALUE
external: VALUE
usersfile: VALUE
nomad/jobs/unifi-traffic-route-ips:
unifi_password: VALUE
unifi_username: VALUE
nomad/oidc:
secret: VALUE
secrets/ldap:
admin_email: VALUE
admin_password: VALUE
admin_user: VALUE
secrets/mysql:
mysql_root_password: VALUE
secrets/postgres:
@ -166,3 +157,4 @@ secrets/smtp:
server: VALUE
tls: VALUE
user: VALUE

View File

@ -0,0 +1,40 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.0.0"
hashes = [
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}

View File

@ -18,14 +18,7 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
}
%{ endif ~}
%{ if batch_node == null ~}
constraint {
attribute = "$${node.unique.name}"
operator = "set_contains_any"
# Only deploy to nodes running tasks to backup
value = "n1,n2"
}
%{ else ~}
%{ if batch_node != null ~}
constraint {
attribute = "$${node.unique.name}"
value = "${batch_node}"
@ -38,7 +31,9 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
mode = "bridge"
port "metrics" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 8080
}
}
@ -49,6 +44,11 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
source = "all-volumes"
}
ephemeral_disk {
# Try to keep restic cache intact
sticky = true
}
service {
name = "backup"
provider = "nomad"
@ -62,6 +62,8 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
task "backup" {
driver = "docker"
shutdown_delay = "5m"
volume_mount {
volume = "all-volumes"
destination = "/data"
@ -69,7 +71,7 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
}
config {
image = "iamthefij/resticscheduler:0.1.1"
image = "iamthefij/resticscheduler:0.4.0"
ports = ["metrics"]
args = [
%{ if batch_node != null ~}
@ -85,81 +87,97 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
]
}
action "unlockenv" {
command = "sh"
args = ["-c", "/bin/resticscheduler -once -unlock all $${NOMAD_TASK_DIR}/node-jobs.hcl"]
}
action "unlocktmpl" {
command = "/bin/resticscheduler"
args = ["-once", "-unlock", "all", "{{ env 'NOMAD_TASK_DIR' }}/node-jobs.hcl"]
}
action "unlockhc" {
command = "/bin/resticscheduler"
args = ["-once", "-unlock", "all", "/local/node-jobs.hcl"]
}
env = {
"RCLONE_CHECKERS" = "2"
"RCLONE_TRANSFERS" = "2"
"RCLONE_FTP_CONCURRENCY" = "5"
RCLONE_CHECKERS = "2"
RCLONE_TRANSFERS = "2"
RCLONE_FTP_CONCURRENCY = "5"
RESTIC_CACHE_DIR = "$${NOMAD_ALLOC_DIR}/data"
TZ = "America/Los_Angeles"
}
template {
data = <<EOF
MYSQL_HOST=127.0.0.1
MYSQL_PORT=3306
# TODO: Move this to new mysql root pass path
{{ with nomadVar "nomad/jobs" }}
{{ with nomadVar "secrets/mysql" }}
MYSQL_USER=root
MYSQL_PASSWORD={{ .mysql_root_password }}
{{ end -}}
{{ with nomadVar (print "nomad/jobs/" (env "NOMAD_JOB_ID")) -}}
{{ with nomadVar "secrets/postgres" }}
POSTGRES_HOST=127.0.0.1
POSTGRES_PORT=5432
POSTGRES_USER={{ .superuser }}
POSTGRES_PASSWORD={{ .superuser_password }}
{{ end -}}
{{ with nomadVar (print "nomad/jobs/" (index (env "NOMAD_JOB_ID" | split "/") 0)) -}}
BACKUP_PASSPHRASE={{ .backup_passphrase }}
RCLONE_FTP_HOST={{ .nas_ftp_host }}
RCLONE_FTP_USER={{ .nas_ftp_user }}
RCLONE_FTP_PASS={{ .nas_ftp_pass.Value | toJSON }}
RCLONE_FTP_EXPLICIT_TLS=true
RCLONE_FTP_NO_CHECK_CERTIFICATE=true
AWS_ACCESS_KEY_ID={{ .nas_minio_access_key_id }}
AWS_SECRET_ACCESS_KEY={{ .nas_minio_secret_access_key }}
{{ end -}}
EOF
destination = "secrets/db.env"
env = true
}
template {
# Build jobs based on node
data = <<EOF
# Current node is {{ env "node.unique.name" }} {{ env "node.unique.id" }}
{{ range nomadService "grafana" -}}
# grafana .Node {{ .Node }}
%{ for job_file in fileset(module_path, "jobs/*.hcl") ~}
{{ range nomadService 1 "backups" "${trimsuffix(basename(job_file), ".hcl")}" -}}
# ${trimsuffix(basename(job_file), ".hcl")} .Node {{ .Node }}
{{ if eq .Node (env "node.unique.id") -}}
${file("${module_path}/jobs/grafana.hcl")}
{{- end }}
{{- end }}
${file("${module_path}/${job_file}")}
{{ range nomadService "photoprism" -}}
# photoprism .Node {{ .Node }}
{{ if eq .Node (env "node.unique.id") -}}
${file("${module_path}/jobs/photoprism.hcl")}
{{- end }}
{{- end }}
{{ end -}}
{{ end -}}
%{ endfor ~}
{{ range nomadService "lldap" -}}
# lldap .Node {{ .Node }}
{{ if eq .Node (env "node.unique.id") -}}
${file("${module_path}/jobs/lldap.hcl")}
{{- end }}
{{- end }}
# Dummy job to keep task healthy on node without any stateful services
job "Dummy" {
schedule = "@daily"
{{ range nomadService "sonarr" -}}
# sonarr .Node {{ .Node }}
{{ if eq .Node (env "node.unique.id") -}}
${file("${module_path}/jobs/sonarr.hcl")}
{{- end }}
{{- end }}
config {
repo = "/local/dummy-repo"
passphrase = env("BACKUP_PASSPHRASE")
}
{{ range nomadService "nzbget" -}}
# nzbget .Node {{ .Node }}
{{ if eq .Node (env "node.unique.id") -}}
${file("${module_path}/jobs/nzbget.hcl")}
{{- end }}
{{- end }}
backup {
paths = ["/local/node-jobs.hcl"]
}
forget {
KeepLast = 1
}
}
EOF
destination = "local/node-jobs.hcl"
}
resources {
cpu = 50
memory = 256
memory = 500
}
}
@ -172,8 +190,8 @@ ${file("${module_path}/jobs/nzbget.hcl")}
}
config {
image = "alpine:3.17"
args = ["/bin/sh", "$${NOMAD_TASK_DIR}/start.sh"]
image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
}
resources {
@ -181,15 +199,6 @@ ${file("${module_path}/jobs/nzbget.hcl")}
memory = 100
}
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
EOF
destination = "$${NOMAD_TASK_DIR}/start.sh"
}
template {
data = <<EOF
syslog = no
@ -199,22 +208,35 @@ delay = yes
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" }}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
{{ end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
[postgres_client]
client = yes
accept = 127.0.0.1:5432
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "postgres-tls" }}
connect = {{ .Address }}:{{ .Port }}
{{ end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/postgres_stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
# TODO: Get psk for backup jobs despite multiple job declarations
# Probably should use variable ACLs to grant each node job to this path
template {
data = <<EOF
{{- with nomadVar (print "nomad/jobs/" (env "NOMAD_JOB_ID")) }}{{ .mysql_stunnel_psk }}{{ end -}}
{{- with nomadVar "secrets/mysql/allowed_psks/backups" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
template {
data = <<EOF
{{- with nomadVar "secrets/postgres/allowed_psks/backups" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/postgres_stunnel_psk.txt"
}
}
}
}

136
backups/backups.tf Normal file
View File

@ -0,0 +1,136 @@
resource "nomad_job" "backup" {
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
batch_node = null,
use_wesher = var.use_wesher
})
}
resource "nomad_job" "backup-oneoff" {
# TODO: Get list of nomad hosts dynamically
for_each = toset(["n1", "pi4"])
# for_each = toset([
# for node in data.consul_service.nomad.service :
# node.node_name
# ])
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
batch_node = each.key,
use_wesher = var.use_wesher
})
}
locals {
# NOTE: This can't be dynamic in first deploy since these values are not known
# all_job_ids = toset(flatten([[for job in resource.nomad_job.backup-oneoff : job.id], [resource.nomad_job.backup.id]]))
all_job_ids = toset(["backup", "backup-oneoff-n1", "backup-oneoff-pi4"])
}
resource "nomad_acl_policy" "secrets_mysql" {
for_each = local.all_job_ids
name = "${each.key}-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
}
}
resource "random_password" "mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "mysql_psk" {
path = "secrets/mysql/allowed_psks/backups"
items = {
psk = "backups:${resource.random_password.mysql_psk.result}"
}
}
resource "nomad_acl_policy" "mysql_psk" {
for_each = local.all_job_ids
name = "${each.key}-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/backups" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
group = "backup"
task = "stunnel"
}
}
resource "nomad_acl_policy" "secrets_postgres" {
for_each = local.all_job_ids
name = "${each.key}-secrets-postgres"
description = "Give access to Postgres secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
}
}
resource "random_password" "postgres_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "postgres_psk" {
path = "secrets/postgres/allowed_psks/backups"
items = {
psk = "backups:${resource.random_password.postgres_psk.result}"
}
}
resource "nomad_acl_policy" "postgres_psk" {
for_each = local.all_job_ids
name = "${each.key}-secrets-postgres-psk"
description = "Give access to Postgres PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres/allowed_psks/backups" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = each.key
group = "backup"
task = "stunnel"
}
}

54
backups/jobs/authelia.hcl Normal file
View File

@ -0,0 +1,54 @@
job "authelia" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/authelia"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local authelia dir" {
pre_script {
on_backup = "mkdir -p /local/authelia"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "authelia"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/authelia/dump.sql"
}
}
backup {
paths = ["/local/authelia"]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

57
backups/jobs/git.hcl Normal file
View File

@ -0,0 +1,57 @@
job "git" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/gitea"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local gitea dir" {
pre_script {
on_backup = "mkdir -p /local/gitea"
}
}
task "Backup database" {
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = "gitea"
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/gitea/dump.sql"
}
}
backup {
paths = [
"/local/gitea",
"/data/nas-container/gitea",
]
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -1,9 +1,13 @@
job "grafana" {
schedule = "0 * * * *"
schedule = "@daily"
config {
repo = "rclone::ftp,env_auth:/nomad/grafana"
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/grafana"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local grafana dir" {
@ -26,8 +30,14 @@ job "grafana" {
backup {
paths = ["/local/grafana"]
# Because path is absolute
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}

64
backups/jobs/lidarr.hcl Normal file
View File

@ -0,0 +1,64 @@
job "lidarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/lidarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "lidarr"
no_tablespaces = true
dump_to = "/data/nas-container/lidarr/Backups/dump-lidarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "lidarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/lidarr/Backups/dump-lidarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/lidarr"]
backup_opts {
Exclude = [
"lidarr_backup_*.zip",
"/data/nas-container/lidarr/MediaCover",
"/data/nas-container/lidarr/logs",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -2,8 +2,12 @@ job "lldap" {
schedule = "@daily"
config {
repo = "rclone::ftp,env_auth:/nomad/lldap"
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/lldap"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local backup dir" {
@ -26,8 +30,14 @@ job "lldap" {
backup {
paths = ["/local/lldap"]
# Because path is absolute
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}

View File

@ -2,8 +2,12 @@ job "nzbget" {
schedule = "@daily"
config {
repo = "rclone::ftp,env_auth:/nomad/nzbget"
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/nzbget"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
backup {
@ -13,8 +17,14 @@ job "nzbget" {
# Queued nzb files
"/data/media-write/Downloads/nzb",
]
# Because path is absolute
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}

View File

@ -2,8 +2,12 @@ job "photoprism" {
schedule = "10 * * * *"
config {
repo = "rclone::ftp,env_auth:/nomad/photoprism"
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/photoprism"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Create local photoprism dir" {
@ -29,8 +33,17 @@ job "photoprism" {
"/local/photoprism",
"/data/nas-container/photoprism",
]
# Because path is absolute
backup_opts {
Host = "nomad"
Exclude = [
"/data/nas-container/photoprism/cache",
]
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}

64
backups/jobs/radarr.hcl Normal file
View File

@ -0,0 +1,64 @@
job "radarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/radarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "radarr"
no_tablespaces = true
dump_to = "/data/nas-container/radarr/Backups/dump-radarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "radarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/radarr/Backups/dump-radarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/radarr"]
backup_opts {
Exclude = [
"radarr_backup_*.zip",
"/data/nas-container/radarr/MediaCover",
"/data/nas-container/radarr/logs",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

View File

@ -2,14 +2,24 @@ job "sabnzbd" {
schedule = "@daily"
config {
repo = "rclone::ftp,env_auth:/nomad/sabnzbd"
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/sabnzbd"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
backup {
paths = ["/data/media-write/Downloads/sabnzbd"]
# Because path is absolute
backup_opts {
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}

67
backups/jobs/sonarr.hcl Normal file
View File

@ -0,0 +1,67 @@
job "sonarr" {
schedule = "@daily"
config {
repo = "s3://backups-minio.agnosticfront.thefij:8443/nomad/sonarr"
passphrase = env("BACKUP_PASSPHRASE")
options {
InsecureTls = true
}
}
task "Backup main database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "sonarr"
no_tablespaces = true
dump_to = "/data/nas-container/sonarr/Backups/dump-sonarr.sql"
}
}
task "Backup logs database" {
postgres "Backup database" {
hostname = env("POSTGRES_HOST")
port = env("POSTGRES_PORT")
username = env("POSTGRES_USER")
password = env("POSTGRES_PASSWORD")
database = "sonarr-logs"
no_tablespaces = true
dump_to = "/data/nas-container/sonarr/Backups/dump-sonarr-logs.sql"
}
}
backup {
paths = ["/data/nas-container/sonarr"]
backup_opts {
Exclude = [
"sonarr_backup_*.zip",
"/data/nas-container/sonarr/MediaCover",
"/data/nas-container/sonarr/logs",
"*.db",
"*.db-shm",
"*.db-wal",
]
Host = "nomad"
}
restore_opts {
Host = ["nomad"]
# Because path is absolute
Target = "/"
}
}
forget {
KeepLast = 2
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

5
backups/vars.tf Normal file
View File

@ -0,0 +1,5 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

12
core.tf
View File

@ -1,12 +0,0 @@
module "databases" {
source = "./databases"
}
module "core" {
source = "./core"
base_hostname = var.base_hostname
# Metrics and Blocky depend on databases
depends_on = [module.databases]
}

View File

@ -2,20 +2,39 @@
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.20"
version = "2.1.1"
hashes = [
"h1:M/QVXHPfeySejJZI3I8mBYrL/J9VsbnyF/dKIMlUhXo=",
"zh:02989edcebe724fc0aa873b22176fd20074c4f46295e728010711a8fc5dfa72c",
"zh:089ba7d19bcf5c6bab3f8b8c5920eb6d78c52cf79bb0c5dfeb411c600e7efcba",
"zh:235865a2182ca372bcbf440201a8b8cc0715ad5dbc4de893d99b6f32b5be53ab",
"zh:67ea718764f3f344ecc6e027d20c1327b86353c8064aa90da3ec12cec4a88954",
"h1:liQBgBXfQEYmwpoGZUfSsu0U0t/nhvuRZbMhaMz7wcQ=",
"zh:28bc6922e8a21334568410760150d9d413d7b190d60b5f0b4aab2f4ef52efeeb",
"zh:2d4283740e92ce1403875486cd5ff2c8acf9df28c190873ab4d769ce37db10c1",
"zh:457e16d70075eae714a7df249d3ba42c2176f19b6750650152c56f33959028d9",
"zh:49ee88371e355c00971eefee6b5001392431b47b3e760a5c649dda76f59fb8fa",
"zh:614ad3bf07155ed8a5ced41dafb09042afbd1868490a379654b3e970def8e33d",
"zh:75be7199d76987e7549e1f27439922973d1bf27353b44a593bfbbc2e3b9f698f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:8c68c540f0df4980568bdd688c2adec86eda62eb2de154e3db215b16de0a7ae0",
"zh:911969c63a69a733be57b96d54c5966c9424e1abec8d5f20038c8cef3a504c65",
"zh:a673c92ddc9d47e8d53dcb9b376f1adcb4543488202fc83a3e7eab8677530684",
"zh:a94a73eae89fd8c8ebf872013079be41161d3f293f4026c92d45c4c5667dd613",
"zh:db6b89f8b696040c0344f00928e4cf6e0a75034421ba14cdcd8a4d23bc865dce",
"zh:e512c0b1239e3d66b60d22c2b4de19fea288e492cde90dff9277cc475fd9dbbf",
"zh:ef6eccecbdef3bb8ce629cabfb5550c1db5c3e952943dda1786ef6cb470a8c23",
"zh:888e14a24410d56b37212fbea373a3e0401d0ff8f8e4f4dd00ba8b29de9fed39",
"zh:aa261925e8b152636a0886b3a2149864707632d836e98a88dacea6cfb6302082",
"zh:ac10cefb4064b3bb63d4b0379624a416a45acf778eac0004466f726ead686196",
"zh:b1a3c8b4d5b2dc9b510eac5e9e02665582862c24eb819ab74f44d3d880246d4f",
"zh:c552e2fe5670b6d3ad9a5faf78e3a27197eeedbe2b13928d2c491fa509bc47c7",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0"
hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
]
}

204
core/authelia.tf Normal file
View File

@ -0,0 +1,204 @@
module "authelia" {
source = "../services/service"
name = "authelia"
instance_count = 2
priority = 70
image = "authelia/authelia:4.37"
args = ["--config", "$${NOMAD_TASK_DIR}/authelia.yml"]
ingress = true
service_port = 9999
service_port_static = true
use_wesher = var.use_wesher
# metrics_port = 9959
env = {
AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/ldap_password.txt"
AUTHELIA_JWT_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
AUTHELIA_SESSION_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/session_secret.txt"
AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE = "$${NOMAD_SECRETS_DIR}/storage_encryption_key.txt"
AUTHELIA_STORAGE_MYSQL_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/mysql_password.txt"
AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/oidc_hmac_secret.txt"
AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_PRIVATE_KEY_FILE = "$${NOMAD_SECRETS_DIR}/oidc_issuer_private_key.txt"
# AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_CERTIFICATE_CHAIN_FILE = "$${NOMAD_SECRETS_DIR}/oidc_issuer_certificate_chain.txt"
}
use_mysql = true
use_ldap = true
use_redis = true
use_smtp = true
mysql_bootstrap = {
enabled = true
}
service_tags = [
# Configure traefik to add this middleware
"traefik.http.middlewares.authelia.forwardAuth.address=http://authelia.nomad:$${NOMAD_PORT_main}/api/verify?rd=https%3A%2F%2Fauthelia.${var.base_hostname}%2F",
"traefik.http.middlewares.authelia.forwardAuth.trustForwardHeader=true",
"traefik.http.middlewares.authelia.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email",
"traefik.http.middlewares.authelia-basic.forwardAuth.address=http://authelia.nomad:$${NOMAD_PORT_main}/api/verify?auth=basic",
"traefik.http.middlewares.authelia-basic.forwardAuth.trustForwardHeader=true",
"traefik.http.middlewares.authelia-basic.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email",
]
templates = [
{
data = file("${path.module}/authelia.yml")
dest = "authelia.yml"
mount = false
},
{
data = "{{ with nomadVar \"secrets/ldap\" }}{{ .admin_password }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "ldap_password.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .jwt_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "jwt_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .session_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "session_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .storage_encryption_key }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "storage_encryption_key.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .db_pass }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "mysql_password.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_hmac_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_hmac_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_issuer_private_key }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_issuer_private_key.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_issuer_certificate_chain }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_issuer_certificate_chain.txt"
mount = false
},
{
data = "{{ with nomadVar \"secrets/smtp\" }}{{ .password }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "smtp_password.txt"
mount = false
},
]
}
resource "nomad_acl_policy" "authelia" {
name = "authelia"
description = "Give access to shared authelia variables"
rules_hcl = <<EOH
namespace "default" {
variables {
path "authelia/*" {
capabilities = ["read"]
}
path "secrets/authelia/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = module.authelia.job_id
}
}
# Give access to ldap secrets
resource "nomad_acl_policy" "authelia_ldap_secrets" {
name = "authelia-secrets-ldap"
description = "Give access to LDAP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/ldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = module.authelia.job_id
}
}
# Enable oidc for nomad clients
module "nomad_oidc_client" {
source = "./oidc_client"
name = "nomad"
oidc_client_config = {
description = "Nomad"
authorization_policy = "two_factor"
redirect_uris = [
"https://nomad.${var.base_hostname}/oidc/callback",
"https://nomad.${var.base_hostname}/ui/settings/tokens",
]
scopes = ["openid", "groups"]
}
}
resource "nomad_acl_auth_method" "nomad_authelia" {
name = "authelia"
type = "OIDC"
token_locality = "global"
max_token_ttl = "1h0m0s"
default = true
config {
oidc_discovery_url = "https://authelia.${var.base_hostname}"
oidc_client_id = module.nomad_oidc_client.client_id
oidc_client_secret = module.nomad_oidc_client.secret
bound_audiences = ["nomad"]
oidc_scopes = [
"groups",
"openid",
]
allowed_redirect_uris = [
"https://nomad.${var.base_hostname}/oidc/callback",
"https://nomad.${var.base_hostname}/ui/settings/tokens",
]
list_claim_mappings = {
"groups" : "roles"
}
}
}
resource "nomad_acl_binding_rule" "nomad_authelia_admin" {
description = "engineering rule"
auth_method = nomad_acl_auth_method.nomad_authelia.name
selector = "\"nomad-deploy\" in list.roles"
bind_type = "role"
bind_name = "admin" # acls.nomad_acl_role.admin.name
}
resource "nomad_acl_binding_rule" "nomad_authelia_deploy" {
description = "engineering rule"
auth_method = nomad_acl_auth_method.nomad_authelia.name
selector = "\"nomad-deploy\" in list.roles"
bind_type = "role"
bind_name = "deploy" # acls.nomad_acl_role.deploy.name
}

View File

@ -13,7 +13,7 @@ default_2fa_method: ""
server:
host: 0.0.0.0
port: 9091
port: {{ env "NOMAD_PORT_main" }}
disable_healthcheck: false
log:
@ -89,8 +89,8 @@ authentication_backend:
groups_filter: (member={dn})
## The username and password of the admin user.
{{ with nomadVar "nomad/jobs/authelia" }}
user: uid={{ .lldap_admin_user }},ou=people,{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}
{{ with nomadVar "secrets/ldap" }}
user: uid={{ .admin_user }},ou=people,{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}
{{ end }}
# password set using secrets file
# password: <secret>
@ -151,6 +151,22 @@ access_control:
networks: 192.168.5.0/24
rules:
## Allow favicons on internal network
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
resources:
- '^/apple-touch-icon-precomposed\.png$'
- '^/assets/safari-pinned-tab\.svg$'
- '^/apple-touch-icon-180x180\.png$'
- '^/apple-touch-icon\.png$'
- '^/favicon\.ico$'
networks:
- internal
policy: bypass
{{ range nomadVarList "authelia/access_control/service_rules" }}{{ with nomadVar .Path }}
- domain: '{{ .name }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
{{ .rule.Value | indent 6 }}
{{ end }}{{ end }}
## Rules applied to everyone
- domain: '*.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}'
networks:
@ -219,13 +235,13 @@ storage:
## The available providers are: filesystem, smtp. You must use only one of these providers.
notifier:
## You can disable the notifier startup check by setting this to true.
disable_startup_check: false
disable_startup_check: true
{{ with nomadVar "nomad/jobs" }}
{{ with nomadVar "secrets/smtp" }}
smtp:
host: {{ .smtp_server }}
port: {{ .smtp_port }}
username: {{ .smtp_user }}
host: {{ .server }}
port: {{ .port }}
username: {{ .user }}
# password: <in file>
{{- end }}
@ -245,4 +261,18 @@ identity_providers:
# hmac_secret: <file>
# issuer_private_key: <file>
clients: {{ with nomadVar "nomad/jobs/authelia" }}{{ .oidc_clients.Value }}{{ end }}
clients:
{{ range nomadVarList "authelia/access_control/oidc_clients" -}}
{{- $name := (sprig_last (sprig_splitList "/" .Path)) -}}
{{ "-" | indent 6 }}
{{ with nomadVar .Path }}
{{- $im := .ItemsMap -}}
{{- $im = sprig_set $im "redirect_uris" (.redirect_uris.Value | parseYAML) -}}
{{- $im = sprig_set $im "scopes" (.scopes.Value | parseYAML) -}}
{{- with nomadVar (printf "secrets/authelia/%s" $name) -}}
{{- $im = sprig_set $im "secret" .secret_hash.Value -}}
{{- end -}}
{{ $im | toYAML | indent 8 }}
{{ end }}
{{ end }}

View File

@ -2,20 +2,39 @@
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
version = "2.0.0"
hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}

View File

@ -1,20 +1,23 @@
variable "config_data" {
type = string
description = "Plain text config file for blocky"
}
job "blocky" {
datacenters = ["dc1"]
type = "system"
type = "service"
priority = 100
constraint {
distinct_hosts = true
}
update {
max_parallel = 1
# TODO: maybe switch to service job from system so we can use canary and autorollback
# auto_revert = true
auto_revert = true
min_healthy_time = "60s"
healthy_deadline = "5m"
}
group "blocky" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/12023
count = 2
network {
mode = "bridge"
@ -24,13 +27,17 @@ job "blocky" {
}
port "api" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = "4000"
}
dns {
# Set expclicit DNS servers because tasks, by default, use this task
servers = ["1.1.1.1", "1.0.0.1"]
servers = [
"192.168.2.1",
]
}
}
@ -58,6 +65,11 @@ job "blocky" {
path = "/"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
}
@ -65,20 +77,32 @@ job "blocky" {
driver = "docker"
config {
image = "ghcr.io/0xerr0r/blocky"
args = ["-c", "${NOMAD_TASK_DIR}/config.yml"]
image = "ghcr.io/0xerr0r/blocky:v0.24"
args = ["-c", "$${NOMAD_TASK_DIR}/config.yml"]
ports = ["dns", "api"]
}
action "refresh-lists" {
command = "/app/blocky"
args = ["lists", "refresh"]
}
action "healthcheck" {
command = "/app/blocky"
args = ["healthcheck"]
}
resources {
cpu = 50
memory = 50
memory_max = 100
memory = 75
memory_max = 150
}
template {
data = var.config_data
destination = "${NOMAD_TASK_DIR}/config.yml"
data = <<EOF
${file("${module_path}/config.yml")}
EOF
destination = "$${NOMAD_TASK_DIR}/config.yml"
splay = "1m"
wait {
@ -95,7 +119,7 @@ job "blocky" {
{{- end }}
{{- end }}
EOF
destination = "${NOMAD_TASK_DIR}/nomad.hosts"
destination = "$${NOMAD_TASK_DIR}/nomad.hosts"
change_mode = "noop"
wait {
@ -103,6 +127,121 @@ job "blocky" {
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/user" }}
{{ with nomadVar "blocky_lists/user" -}}
{{ .block_list.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/block"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "30s"
max = "1m"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/user" }}
{{ with nomadVar "blocky_lists/user" -}}
{{ .allow_list.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/allow"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "30s"
max = "1m"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .smarttv_regex.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/smarttv-regex.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .wemo.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/wemo.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{ if nomadVarExists "blocky_lists/terraform" }}
{{ with nomadVar "blocky_lists/terraform" -}}
{{ .sonos.Value }}
{{- end }}
{{- end }}
EOF
destination = "$${NOMAD_TASK_DIR}/sonos.txt"
change_mode = "script"
change_script {
command = "/app/blocky"
args = ["lists", "refresh"]
timeout = "20s"
}
wait {
min = "10s"
max = "20s"
}
}
}
task "stunnel" {
@ -114,9 +253,9 @@ job "blocky" {
}
config {
image = "alpine:3.17"
image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
args = ["/bin/sh", "${NOMAD_TASK_DIR}/start.sh"]
}
resources {
@ -124,43 +263,41 @@ job "blocky" {
memory = 100
}
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
EOF
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[dns_server]
# Dummy server to keep stunnel running if no mysql is present
accept = 8053
connect = 127.0.0.1:53
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
{{- end }}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}}
[redis_client]
client = yes
accept = 127.0.0.1:6379
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
{{- end }}
EOF
destination = "${NOMAD_TASK_DIR}/stunnel.conf"
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{- with nomadVar "nomad/jobs/blocky/blocky/stunnel" }}{{ .mysql_stunnel_psk }}{{ end -}}
{{- with nomadVar "secrets/mysql/allowed_psks/blocky" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
@ -169,11 +306,11 @@ EOF
data = <<EOF
{{- with nomadVar "nomad/jobs/blocky/blocky/stunnel" -}}{{ .redis_stunnel_psk }}{{ end -}}
EOF
destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
destination = "$${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
}
task "blocky-bootstrap" {
task "mysql-bootstrap" {
driver = "docker"
lifecycle {
@ -184,23 +321,19 @@ EOF
config {
image = "mariadb:10"
args = [
"/usr/bin/timeout",
"2m",
"/bin/bash",
"-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
"/usr/bin/timeout 2m /bin/bash -c \"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do echo 'Retry in 10s'; sleep 10; done\" || true",
]
}
template {
data = <<EOF
[client]
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-server" -}}
host={{ .Address }}
port={{ .Port }}
{{ end -}}
host=127.0.0.1
port=3306
user=root
{{ with nomadVar "nomad/jobs" }}
{{ with nomadVar "secrets/mysql" }}
password={{ .mysql_root_password }}
{{ end }}
EOF

View File

@ -1,25 +1,88 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
locals {
config_data = templatefile(
"${path.module}/config.yml",
{
"base_hostname" = var.base_hostname,
}
)
}
resource "nomad_job" "blocky" {
hcl2 {
enabled = true
vars = {
"config_data" = local.config_data,
jobspec = templatefile("${path.module}/blocky.nomad", {
use_wesher = var.use_wesher,
module_path = path.module,
})
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "blocky_mysql_bootstrap_secrets" {
name = "blocky-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
jobspec = file("${path.module}/blocky.nomad")
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "mysql-bootstrap"
}
}
resource "random_password" "blocky_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "blocky_mysql_psk" {
path = "secrets/mysql/allowed_psks/blocky"
items = {
psk = "blocky:${resource.random_password.blocky_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "blocky_mysql_psk" {
name = "blocky-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/blocky" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "stunnel"
}
}
resource "nomad_variable" "blocky_lists_terraform" {
path = "blocky_lists/terraform"
items = {
smarttv_regex = file("${path.module}/list-smarttv-regex.txt")
wemo = file("${path.module}/list-wemo.txt")
sonos = file("${path.module}/list-sonos.txt")
}
}
resource "nomad_acl_policy" "blocky_lists" {
name = "blocky-lists"
description = "Give access Blocky lists"
rules_hcl = <<EOH
namespace "default" {
variables {
path "blocky_lists/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "blocky"
group = "blocky"
task = "blocky"
}
}

View File

@ -2,28 +2,53 @@ ports:
dns: 53
http: 4000
# I must have ip v6 blocked or something
connectIPVersion: v4
bootstrapDns:
- upstream: 1.1.1.1
- upstream: 1.0.0.1
- upstream: 9.9.9.9
- upstream: 149.112.112.112
upstream:
default:
- 1.1.1.1
- 1.0.0.1
quad9:
- 9.9.9.9
- 149.112.112.112
- 2620:fe::fe
- 2620:fe::9
- https://dns.quad9.net/dns-query
- tcp-tls:dns.quad9.net
quad9-unsecured:
- 9.9.9.10
- 149.112.112.10
- 2620:fe::10
- 2620:fe::fe:10
- https://dns10.quad9.net/dns-query
- tcp-tls:dns10.quad9.net
upstreams:
init:
strategy: fast
groups:
default:
- https://dns.quad9.net/dns-query
- tcp-tls:dns.quad9.net
- https://one.one.one.one/dns-query
- tcp-tls:one.one.one.one
# cloudflare:
# - 1.1.1.1
# - 1.0.0.1
# - 2606:4700:4700::1111
# - 2606:4700:4700::1001
# - https://one.one.one.one/dns-query
# - tcp-tls:one.one.one.one
# quad9:
# - 9.9.9.9
# - 149.112.112.112
# - 2620:fe::fe
# - 2620:fe::9
# - https://dns.quad9.net/dns-query
# - tcp-tls:dns.quad9.net
# quad9-secured:
# - 9.9.9.11
# - 149.112.112.11
# - 2620:fe::11
# - 2620:fe::fe:11
# - https://dns11.quad9.net/dns-query
# - tcp-tls:dns11.quad9.net
# quad9-unsecured:
# - 9.9.9.10
# - 149.112.112.10
# - 2620:fe::10
# - 2620:fe::fe:10
# - https://dns10.quad9.net/dns-query
# - tcp-tls:dns10.quad9.net
conditional:
fallbackUpstream: false
@ -36,9 +61,11 @@ conditional:
.: 192.168.2.1
hostsFile:
filePath: {{ env "NOMAD_TASK_DIR" }}/nomad.hosts
sources:
- {{ env "NOMAD_TASK_DIR" }}/nomad.hosts
hostsTTL: 30s
refreshPeriod: 30s
loading:
refreshPeriod: 30s
clientLookup:
upstream: 192.168.2.1
@ -50,22 +77,12 @@ blocking:
- http://sysctl.org/cameleon/hosts
- https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt
- https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
- https://hosts-file.net/ad_servers.txt
smarttv:
- https://perflyst.github.io/PiHoleBlocklist/SmartTV.txt
- https://perflyst.github.io/PiHoleBlocklist/regex.list
wemo:
- |
# Remote commands
api.xbcs.net
# Firmware updates
fw.xbcs.net
# TURN service
nat.wemo2.com
# Connectivity checks
heartbeat.xwemo.com
malware:
- https://mirror1.malwaredomains.com/files/justdomains
# - https://hosts-file.net/ad_servers.txt
iot:
- https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV.txt
- {{ env "NOMAD_TASK_DIR" }}/smarttv-regex.txt
- {{ env "NOMAD_TASK_DIR" }}/wemo.txt
- {{ env "NOMAD_TASK_DIR" }}/sonos.txt
antisocial:
- |
facebook.com
@ -73,20 +90,21 @@ blocking:
reddit.com
twitter.com
youtube.com
custom:
- {{ env "NOMAD_TASK_DIR" }}/block
whiteLists:
# Move to Gitea when deployed internally
ads:
{{ with nomadVar "nomad/jobs/blocky" -}}
{{ .whitelists_ads.Value | indent 6 }}
{{- end }}
custom:
- {{ env "NOMAD_TASK_DIR" }}/allow
clientGroupsBlock:
default:
- ads
- malware
- smarttv
- wemo
- custom
192.168.3.1/24:
- ads
- iot
- custom
customDNS:
customTTL: 1h
@ -105,7 +123,7 @@ customDNS:
prometheus:
enable: true
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-tls" -}}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-blocky" -}}
redis:
address: 127.0.0.1:6379
# password: ""
@ -114,7 +132,6 @@ redis:
connectionCooldown: 3s
{{ end -}}
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
{{ with nomadVar "nomad/jobs/blocky" -}}
queryLog:

View File

@ -0,0 +1,13 @@
# From: https://perflyst.github.io/PiHoleBlocklist/regex.list
# Title: Perflyst's SmartTV Blocklist for Pi-hole - RegEx extension
# Version: 13July2023v1
# Samsung
/(^|\.)giraffic\.com$/
/(^|\.)internetat\.tv$/
/(^|\.)pavv\.co\.kr$/
/(^|\.)samsungcloudsolution\.net$/
/(^|\.)samsungelectronics\.com$/
/(^|\.)samsungrm\.net$/
# /(^|\.)samsungotn\.net$/ # prevents updates
# /(^|\.)samsungcloudcdn\.com$/ # prevents updates
# /(^|\.)samsungcloudsolution\.com$/ # prevents internet connection

View File

@ -0,0 +1,2 @@
# Block Sonos devices from phoning home and allowing remote access
/(^|\.)sonos\.com$/

View File

@ -0,0 +1,8 @@
# Remote commands
api.xbcs.net
# Firmware updates
fw.xbcs.net
# TURN service
nat.wemo2.com
# Connectivity checks
heartbeat.xwemo.com

5
core/blocky/vars.tf Normal file
View File

@ -0,0 +1,5 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

View File

@ -1,14 +1,24 @@
job "exporters" {
datacenters = ["dc1"]
type = "system"
type = "service"
priority = 55
constraint {
distinct_hosts = true
}
group "promtail" {
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/1202
count = 2
network {
mode = "bridge"
port "promtail" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 9080
}
}
@ -19,8 +29,8 @@ job "exporters" {
port = "promtail"
meta {
nomad_dc = "${NOMAD_DC}"
nomad_node_name = "${node.unique.name}"
nomad_dc = "$${NOMAD_DC}"
nomad_node_name = "$${node.unique.name}"
}
tags = [
@ -38,8 +48,8 @@ job "exporters" {
}
config {
image = "grafana/promtail:2.7.1"
args = ["-config.file=${NOMAD_TASK_DIR}/promtail.yml"]
image = "grafana/promtail:2.9.1"
args = ["-config.file=$${NOMAD_TASK_DIR}/promtail.yml"]
ports = ["promtail"]
# Bind mount host machine-id and log directories
@ -127,7 +137,7 @@ scrape_configs:
- source_labels: ['__journal_com_hashicorp_nomad_task_name']
target_label: nomad_task_name
EOF
destination = "${NOMAD_TASK_DIR}/promtail.yml"
destination = "$${NOMAD_TASK_DIR}/promtail.yml"
}
resources {

View File

@ -8,7 +8,9 @@ job "grafana" {
mode = "bridge"
port "web" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 3000
}
}
@ -26,7 +28,6 @@ job "grafana" {
tags = [
"traefik.enable=true",
"traefik.http.routers.grafana.entryPoints=websecure",
# "traefik.http.routers.grafana.middlewares=authelia@nomad",
]
}
@ -39,8 +40,8 @@ job "grafana" {
}
config {
image = "alpine:3.17"
args = ["/bin/sh", "$${NOMAD_TASK_DIR}/start.sh"]
image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
}
resources {
@ -48,15 +49,6 @@ job "grafana" {
memory = 100
}
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
EOF
destination = "$${NOMAD_TASK_DIR}/start.sh"
}
template {
data = <<EOF
syslog = no
@ -74,17 +66,15 @@ PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
# TODO: Get psk for backup jobs despite multiple job declarations
# Probably should use variable ACLs to grant each node job to this path
template {
data = <<EOF
{{- with nomadVar "nomad/jobs/grafana/grafana/stunnel" }}{{ .mysql_stunnel_psk }}{{ end -}}
{{- with nomadVar "secrets/mysql/allowed_psks/grafana" }}{{ .psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
}
task "grafana-bootstrap" {
task "mysql-bootstrap" {
driver = "docker"
lifecycle {
@ -96,10 +86,10 @@ EOF
image = "mariadb:10"
args = [
"/usr/bin/timeout",
"2m",
"20m",
"/bin/bash",
"-c",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do echo 'Retry in 10s'; sleep 10; done",
]
}
@ -109,7 +99,7 @@ EOF
host=127.0.0.1
port=3306
user=root
{{ with nomadVar "nomad/jobs" -}}
{{ with nomadVar "secrets/mysql" -}}
password={{ .mysql_root_password }}
{{ end -}}
EOF
@ -143,26 +133,28 @@ SELECT 'NOOP';
driver = "docker"
config {
image = "grafana/grafana:9.4.2"
image = "grafana/grafana:10.0.10"
args = ["--config", "$${NOMAD_ALLOC_DIR}/config/grafana.ini"]
ports = ["web"]
}
env = {
"GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel,natel-discrete-panel",
"GF_PATHS_CONFIG" = "$${NOMAD_ALLOC_DIR}/config/grafana.ini"
"GF_PATHS_PROVISIONING" = "$${NOMAD_ALLOC_DIR}/config/provisioning"
"GF_PATHS_CONFIG" = "$${NOMAD_ALLOC_DIR}/config/grafana.ini",
"GF_PATHS_PROVISIONING" = "$${NOMAD_ALLOC_DIR}/config/provisioning",
}
template {
data = <<EOF
{{ with nomadVar "secrets/smtp" -}}
GF_SMTP_USER={{ .user }}
GF_SMTP_PASSWORD={{ .password }}
{{ end -}}
{{ with nomadVar "nomad/jobs/grafana" -}}
GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }}
GF_SMTP_USER={{ .smtp_user }}
GF_SMTP_PASSWORD={{ .smtp_password }}
GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .minio_access_key }}
GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .minio_secret_key }}
GRAFANA_ALERT_EMAIL_ADDRESSES={{ .alert_email_addresses }}
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET={{ .oidc_secret }}
{{ if .db_name -}}
# Database storage
GF_DATABASE_TYPE=mysql
@ -170,10 +162,14 @@ GF_DATABASE_HOST=127.0.0.1:3306
GF_DATABASE_NAME={{ .db_name }}
GF_DATABASE_USER={{ .db_user }}
GF_DATABASE_PASSWORD={{ .db_pass }}
{{- end }}
{{ end -}}
SLACK_BOT_URL={{ .slack_bot_url }}
SLACK_BOT_TOKEN={{ .slack_bot_token }}
SLACK_HOOK_URL={{ .slack_hook_url }}
{{ end -}}
{{ with nomadVar "secrets/authelia/grafana" -}}
GF_AUTH_GENERIC_OAUTH_CLIENT_ID={{ .client_id }}
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET={{ .secret }}
{{ end -}}
EOF
env = true
@ -195,7 +191,7 @@ SLACK_HOOK_URL={{ .slack_hook_url }}
}
config {
image = "alpine"
image = "alpine:3.17"
args = ["$${NOMAD_TASK_DIR}/startup.sh"]
}
@ -263,7 +259,7 @@ ${file(join("/", [module_path, "grafana", config_file]))}
# Set owner to grafana uid
# uid = 472
# Change template delimeter for dashboard files that use json and have double curly braces and square braces
%{ if length(regexall("dashboard", config_file)) > 0 ~}
%{ if endswith(config_file, ".json") ~}
left_delimiter = "<<<<"
right_delimiter = ">>>>"
%{ endif }
@ -279,6 +275,11 @@ ${file(join("/", [module_path, "grafana", config_file]))}
task "grafana-image-renderer" {
driver = "docker"
constraint {
attribute = "$${attr.cpu.arch}"
value = "amd64"
}
config {
image = "grafana/grafana-image-renderer:3.6.1"
ports = ["renderer"]

View File

@ -20,8 +20,8 @@ data = /var/lib/grafana
# Directory where grafana will automatically scan and look for plugins
;plugins = /var/lib/grafana/plugins
# folder that contains provisioning config files that grafana will apply on startup and while running.
; provisioning = /etc/grafana/provisioning
# folder that contains PROVISIONING config files that grafana will apply on startup and while running.
provisioning = from_env
#################################### Server ####################################
[server]
@ -43,7 +43,7 @@ data = /var/lib/grafana
# The full public facing url you use in browser, used for redirects and emails
# If you use reverse proxy and sub path specify full url (with sub path)
root_url = https://grafana.thefij.rocks
root_url = https://grafana.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
# Log web requests
;router_logging = false
@ -261,15 +261,19 @@ log_queries =
enabled = true
name = Authelia
;allow_sign_up = true
client_id = grafana
client_id = from_env
client_secret = from_env
scopes = openid profile email groups
auth_url = https://authelia.thefij.rocks/api/oidc/authorization
token_url = https://authelia.thefij.rocks/api/oidc/token
api_url = https://authelia.thefij.rocks/api/oidc/userinfo
auth_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/authorization
token_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/token
api_url = https://authelia.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}/api/oidc/userinfo
login_attribute_path = preferred_username
groups_attribute_path = groups
name_attribute_path = name
# Role attribute path is not working
role_attribute_path = contains(groups[*], 'admin') && 'Admin' || contains(groups[*], 'grafana-admin') && 'Admin' || contains(groups[*], 'grafana-editor') && 'Editor' || contains(groups[*], 'developer') && 'Editor'
allow_assign_grafana_admin = true
skip_org_role_sync = true
use_pkce = true
;team_ids =
@ -437,7 +441,7 @@ enabled = true
provider = s3
[external_image_storage.s3]
endpoint = https://minio.thefij.rocks
endpoint = https://minio.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}
bucket = grafana-images
region = us-east-1
path_style_access = true

View File

@ -104,7 +104,7 @@
"uid": "Prometheus"
},
"exemplar": false,
"expr": "sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"expr": "sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})",
"format": "table",
"instant": true,
"interval": "",
@ -458,7 +458,7 @@
"uid": "Prometheus"
},
"exemplar": true,
"expr": "sum(blocky_blacklist_cache) / sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"expr": "sum(blocky_blacklist_cache) / sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})",
"format": "table",
"instant": false,
"interval": "",
@ -533,7 +533,7 @@
"uid": "Prometheus"
},
"exemplar": true,
"expr": "sum(go_memstats_sys_bytes{job=\"exporters\", consul_service=\"blocky-api\"})/sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"expr": "sum(go_memstats_sys_bytes{job=\"exporters\", consul_service=\"blocky-api\"})/sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})",
"format": "table",
"instant": false,
"interval": "",
@ -753,7 +753,7 @@
"uid": "Prometheus"
},
"exemplar": true,
"expr": "sum(blocky_cache_entry_count)/ sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"expr": "sum(blocky_cache_entry_count)/ sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})",
"format": "table",
"instant": false,
"interval": "",
@ -1162,7 +1162,7 @@
"uid": "Prometheus"
},
"exemplar": false,
"expr": "sum(time() -blocky_last_list_group_refresh)/ sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"expr": "sum(time() -blocky_last_list_group_refresh)/ sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})",
"format": "table",
"instant": true,
"interval": "",
@ -1224,7 +1224,7 @@
"uid": "Prometheus"
},
"exemplar": true,
"expr": "sum(blocky_prefetch_domain_name_cache_count)/ sum(up{job=\"exporters\", consul_service=\"blocky-api\"})",
"expr": "sum(blocky_prefetch_domain_name_cache_count)/ sum(nomad_client_allocs_running{exported_job=\"blocky\", task=\"blocky\"})",
"format": "table",
"interval": "",
"legendFormat": "",

View File

@ -5,4 +5,4 @@ providers:
type: file
disableDeletion: false
options:
path: /etc/grafana/provisioning/dashboards/default
path: {{ env "NOMAD_ALLOC_DIR" }}/config/provisioning/dashboards/default

View File

@ -0,0 +1,19 @@
---
apiVersion: 1
datasources:
- name: HASS Metrics
url: "http://192.168.2.75:8086"
type: influxdb
access: proxy
database: hass
jsonData:
dbName: hass
- name: Proxmox Metrics
url: "http://192.168.2.75:8086"
type: influxdb
access: proxy
database: proxmox
jsonData:
dbName: proxmox

96
core/lego.nomad Normal file
View File

@ -0,0 +1,96 @@
variable "lego_version" {
default = "4.14.2"
type = string
}
variable "nomad_var_dirsync_version" {
default = "0.0.2"
type = string
}
job "lego" {
type = "batch"
periodic {
cron = "@weekly"
prohibit_overlap = true
}
group "main" {
network {
dns {
servers = ["1.1.1.1", "1.0.0.1"]
}
}
task "main" {
driver = "exec"
config {
command = "/bin/bash"
args = ["${NOMAD_TASK_DIR}/start.sh"]
}
artifact {
source = "https://github.com/go-acme/lego/releases/download/v${var.lego_version}/lego_v${var.lego_version}_linux_${attr.cpu.arch}.tar.gz"
}
artifact {
source = "https://git.iamthefij.com/iamthefij/nomad-var-dirsync/releases/download/v${var.nomad_var_dirsync_version}/nomad-var-dirsync-linux-${attr.cpu.arch}.tar.gz"
}
template {
data = <<EOH
#! /bin/sh
set -ex
cd ${NOMAD_TASK_DIR}
echo "Read certs from nomad vars"
${NOMAD_TASK_DIR}/nomad-var-dirsync-linux-{{ env "attr.cpu.arch" }} -root-var=secrets/certs read .
action=run
if [ -f /.lego/certificates/_.thefij.rocks.crt ]; then
action=renew
fi
echo "Attempt to $action certificates"
${NOMAD_TASK_DIR}/lego \
--accept-tos --pem \
--email=iamthefij@gmail.com \
--domains="*.thefij.rocks" \
--dns="cloudflare" \
$action \
--$action-hook="${NOMAD_TASK_DIR}/nomad-var-dirsync-linux-{{ env "attr.cpu.arch" }} -root-var=secrets/certs write .lego" \
EOH
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/lego" -}}
CF_DNS_API_TOKEN={{ .domain_lego_dns }}
CF_ZONE_API_TOKEN={{ .domain_lego_dns }}
{{- end }}
EOH
destination = "secrets/cloudflare.env"
env = true
}
env = {
NOMAD_ADDR = "unix:///secrets/api.sock"
}
identity {
env = true
}
resources {
cpu = 50
memory = 100
}
}
}
}

23
core/lego.tf Normal file
View File

@ -0,0 +1,23 @@
resource "nomad_job" "lego" {
jobspec = file("${path.module}/lego.nomad")
}
resource "nomad_acl_policy" "secrets_certs_write" {
name = "secrets-certs-write"
description = "Write certs to secrets store"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/certs/*" {
capabilities = ["write", "read"]
}
path "secrets/certs" {
capabilities = ["write", "read"]
}
}
}
EOH
job_acl {
job_id = "lego/*"
}
}

View File

@ -3,31 +3,27 @@ auth_enabled: false
server:
http_listen_port: 3100
ingester:
lifecycler:
address: 127.0.0.1
ring:
kvstore:
store: inmemory
replication_factor: 1
final_sleep: 0s
chunk_idle_period: 5m
chunk_retain_period: 30s
max_transfer_retries: 0
common:
ring:
instance_addr: 127.0.0.1
kvstore:
store: inmemory
replication_factor: 1
path_prefix: /tmp/loki
schema_config:
configs:
- from: 2018-04-15
store: boltdb
- from: 2020-05-15
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 168h
period: 24h
storage_config:
boltdb:
directory: {{ env "NOMAD_TASK_DIR" }}/index
boltdb_shipper:
active_index_directory: {{ env "NOMAD_TASK_DIR" }}/index
filesystem:
directory: {{ env "NOMAD_TASK_DIR" }}/chunks
@ -38,8 +34,8 @@ limits_config:
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 0s
max_look_back_period: 168h
table_manager:
retention_deletes_enabled: false
retention_period: 0s
retention_deletes_enabled: true
retention_period: 168h

24
core/loki.tf Normal file
View File

@ -0,0 +1,24 @@
module "loki" {
source = "../services/service"
detach = false
name = "loki"
image = "grafana/loki:2.8.7"
args = ["--config.file=$${NOMAD_TASK_DIR}/loki-config.yml"]
service_port = 3100
ingress = true
use_wesher = var.use_wesher
service_check = {
path = "/ready"
}
sticky_disk = true
templates = [
{
data = file("${path.module}/loki-config.yml")
dest = "loki-config.yml"
mount = false
}
]
}

View File

@ -1,21 +1,14 @@
module "blocky" {
source = "./blocky"
base_hostname = var.base_hostname
use_wesher = var.use_wesher
# Not in this module
# depends_on = [module.databases]
}
module "traefik" {
source = "./traefik"
base_hostname = var.base_hostname
}
module "metrics" {
source = "./metrics"
# Not in this module
# depends_on = [module.databases]
}
resource "nomad_job" "nomad-client-stalker" {
@ -23,180 +16,12 @@ resource "nomad_job" "nomad-client-stalker" {
jobspec = file("${path.module}/nomad-client-stalker.nomad")
}
module "loki" {
source = "../services/service"
name = "loki"
image = "grafana/loki:2.2.1"
args = ["--config.file=$${NOMAD_TASK_DIR}/loki-config.yml"]
service_port = 3100
ingress = true
sticky_disk = true
# healthcheck = "/ready"
templates = [
{
data = file("${path.module}/loki-config.yml")
dest = "loki-config.yml"
mount = false
}
]
}
resource "nomad_job" "syslog-ng" {
jobspec = file("${path.module}/syslogng.nomad")
depends_on = [module.loki]
}
resource "nomad_job" "ddclient" {
jobspec = file("${path.module}/ddclient.nomad")
}
resource "nomad_job" "lldap" {
jobspec = file("${path.module}/lldap.nomad")
}
module "authelia" {
source = "../services/service"
name = "authelia"
instance_count = 2
priority = 70
image = "authelia/authelia:latest"
args = ["--config", "$${NOMAD_TASK_DIR}/authelia.yml"]
ingress = true
service_port = 9091
service_port_static = true
# metrics_port = 9959
env = {
AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/ldap_password.txt"
AUTHELIA_JWT_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
AUTHELIA_SESSION_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/session_secret.txt"
AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE = "$${NOMAD_SECRETS_DIR}/storage_encryption_key.txt"
AUTHELIA_STORAGE_MYSQL_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/mysql_password.txt"
AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE = "$${NOMAD_SECRETS_DIR}/oidc_hmac_secret.txt"
AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_PRIVATE_KEY_FILE = "$${NOMAD_SECRETS_DIR}/oidc_issuer_private_key.txt"
# AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_CERTIFICATE_CHAIN_FILE = "$${NOMAD_SECRETS_DIR}/oidc_issuer_certificate_chain.txt"
}
use_mysql = true
use_ldap = true
use_redis = true
mysql_bootstrap = {
enabled = true
}
service_tags = [
# Configure traefik to add this middleware
"traefik.http.middlewares.authelia.forwardAuth.address=http://authelia.nomad:9091/api/verify?rd=https%3A%2F%2Fauthelia.thefij.rocks%2F",
"traefik.http.middlewares.authelia.forwardAuth.trustForwardHeader=true",
"traefik.http.middlewares.authelia.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email",
"traefik.http.middlewares.authelia-basic.forwardAuth.address=http://authelia.nomad:9091/api/verify?auth=basic",
"traefik.http.middlewares.authelia-basic.forwardAuth.trustForwardHeader=true",
"traefik.http.middlewares.authelia-basic.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email",
]
templates = [
{
data = file("${path.module}/authelia.yml")
dest = "authelia.yml"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .lldap_admin_password }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "ldap_password.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .jwt_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "jwt_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .session_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "session_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .storage_encryption_key }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "storage_encryption_key.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .db_pass }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "mysql_password.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_hmac_secret }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_hmac_secret.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_issuer_private_key }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_issuer_private_key.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .oidc_issuer_certificate_chain }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "oidc_issuer_certificate_chain.txt"
mount = false
},
{
data = "{{ with nomadVar \"nomad/jobs\" }}{{ .smtp_password }}{{ end }}"
dest_prefix = "$${NOMAD_SECRETS_DIR}"
dest = "smtp_password.txt"
mount = false
},
]
}
resource "nomad_acl_auth_method" "nomad_authelia" {
name = "authelia"
type = "OIDC"
token_locality = "global"
max_token_ttl = "1h0m0s"
default = true
config {
oidc_discovery_url = "https://authelia.thefij.rocks"
oidc_client_id = "nomad"
oidc_client_secret = yamldecode(file("${path.module}/../ansible_playbooks/vars/nomad_vars.yml"))["nomad/oidc"]["secret"]
bound_audiences = ["nomad"]
oidc_scopes = [
"groups",
"openid",
]
allowed_redirect_uris = [
"https://nomad.thefij.rocks/oidc/callback",
"https://nomad.thefij.rocks/ui/settings/tokens",
]
list_claim_mappings = {
"groups" : "roles"
}
}
}
resource "nomad_acl_binding_rule" "nomad_authelia_admin" {
description = "engineering rule"
auth_method = nomad_acl_auth_method.nomad_authelia.name
selector = "\"nomad-deploy\" in list.roles"
bind_type = "role"
bind_name = "admin" # acls.nomad_acl_role.admin.name
}
resource "nomad_acl_binding_rule" "nomad_authelia_deploy" {
description = "engineering rule"
auth_method = nomad_acl_auth_method.nomad_authelia.name
selector = "\"nomad-deploy\" in list.roles"
bind_type = "role"
bind_name = "deploy" # acls.nomad_acl_role.deploy.name
}

119
core/metrics.tf Normal file
View File

@ -0,0 +1,119 @@
resource "nomad_job" "exporters" {
jobspec = templatefile("${path.module}/exporters.nomad", {
use_wesher = var.use_wesher,
})
}
resource "nomad_job" "prometheus" {
jobspec = templatefile("${path.module}/prometheus.nomad", {
use_wesher = var.use_wesher,
})
detach = false
}
resource "nomad_job" "grafana" {
jobspec = templatefile("${path.module}/grafana.nomad", {
module_path = path.module
use_wesher = var.use_wesher
})
depends_on = [nomad_job.prometheus]
}
resource "nomad_acl_policy" "grafana_smtp_secrets" {
name = "grafana-secrets-smtp"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/smtp" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "grafana"
}
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "grafana_mysql_bootstrap_secrets" {
name = "grafana-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "mysql-bootstrap"
}
}
resource "random_password" "grafana_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "grafana_mysql_psk" {
path = "secrets/mysql/allowed_psks/grafana"
items = {
psk = "grafana:${resource.random_password.grafana_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "grafana_mysql_psk" {
name = "grafana-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/grafana" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "grafana"
group = "grafana"
task = "stunnel"
}
}
module "grafana_oidc" {
source = "./oidc_client"
name = "grafana"
oidc_client_config = {
description = "Grafana"
scopes = [
"openid",
"groups",
"email",
"profile",
]
redirect_uris = [
"https://grafana.thefij.rocks/login/generic_oauth",
]
}
job_acl = {
job_id = "grafana"
group = "grafana"
task = "grafana"
}
}

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.0"
hashes = [
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
"h1:tAb2gwW+oZ8/t2j7lExdqpNrxmaWsHbyA2crFWClPb0=",
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
"zh:32fa31329731db2bf4b7d0f09096416ca146f05b58f4482bbd4ee0f28cefbbcc",
"zh:59136b73d3abe7cc5b06d9e12d123ad21298ca86ed49a4060a3cd7c2a28a74a1",
"zh:a191f3210773ca25c543a92f2d392b85e6a053d596293655b1f25b33eb843b4c",
"zh:b8b6033cf0687eadc1099f11d9fb2ca9429ff40c2d85bd6cb047c0f6bc5d5d8d",
"zh:bb7d67ed28aa9b28fc5154161af003383f940b2beda0d4577857cad700f39cd1",
"zh:be615288f59327b975532a1999deab60a022e6819fe80e5a32526155210ecbba",
"zh:de1e3d5c34eef87eb301e74717754babb6dc8e19e3a964919e1165c5a076a719",
"zh:eb8c61b20d8ce2bfff9f735ca8456a0d6368af13aa1f43866f61c70f88cc491c",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}

View File

@ -1,783 +0,0 @@
{
"__inputs": [
{
"name": "DS_PROMETHEUS",
"label": "Prometheus",
"description": "",
"type": "datasource",
"pluginId": "prometheus",
"pluginName": "Prometheus"
}
],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "7.5.5"
},
{
"type": "panel",
"id": "graph",
"name": "Graph",
"version": ""
},
{
"type": "panel",
"id": "piechart",
"name": "Pie chart v2",
"version": ""
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
},
{
"type": "panel",
"id": "singlestat",
"name": "Singlestat",
"version": ""
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"description": "Traefik dashboard prometheus",
"editable": true,
"gnetId": 4475,
"graphTooltip": 0,
"id": null,
"iteration": 1620932097756,
"links": [],
"panels": [
{
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
"id": 10,
"title": "$backend stats",
"type": "row"
},
{
"cacheTimeout": null,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 1
},
"id": 2,
"interval": null,
"links": [],
"maxDataPoints": 3,
"options": {
"displayLabels": [],
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"values": [
"value",
"percent"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"text": {}
},
"targets": [
{
"exemplar": true,
"expr": "traefik_service_requests_total{service=\"$service\"}",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{method}} : {{code}}",
"refId": "A"
}
],
"title": "$service return code",
"type": "piechart"
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"format": "ms",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 1
},
"id": 4,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": true
},
"tableColumn": "",
"targets": [
{
"exemplar": true,
"expr": "sum(traefik_service_request_duration_seconds_sum{service=\"$service\"}) / sum(traefik_service_requests_total{service=\"$service\"}) * 1000",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "",
"refId": "A"
}
],
"thresholds": "",
"title": "$service response time",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "avg"
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 24,
"x": 0,
"y": 8
},
"hiddenSeries": false,
"id": 3,
"legend": {
"alignAsTable": true,
"avg": true,
"current": false,
"max": true,
"min": true,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.5",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"exemplar": true,
"expr": "sum(rate(traefik_service_requests_total{service=\"$service\"}[5m]))",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "Total requests $service",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Total requests over 5min $service",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"collapsed": false,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 15
},
"id": 12,
"panels": [],
"title": "Global stats",
"type": "row"
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 16
},
"hiddenSeries": false,
"id": 5,
"legend": {
"alignAsTable": true,
"avg": false,
"current": true,
"max": true,
"min": true,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.5",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": true,
"steppedLine": false,
"targets": [
{
"expr": "rate(traefik_entrypoint_requests_total{entrypoint=~\"$entrypoint\",code=\"200\"}[5m])",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{method}} : {{code}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Status code 200 over 5min",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 16
},
"hiddenSeries": false,
"id": 6,
"legend": {
"alignAsTable": true,
"avg": false,
"current": true,
"max": true,
"min": true,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.5",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": true,
"steppedLine": false,
"targets": [
{
"expr": "rate(traefik_entrypoint_requests_total{entrypoint=~\"$entrypoint\",code!=\"200\"}[5m])",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{ method }} : {{code}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Others status code over 5min",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"cacheTimeout": null,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 23
},
"id": 7,
"interval": null,
"links": [],
"maxDataPoints": 3,
"options": {
"displayLabels": [],
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"values": [
"value"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"sum"
],
"fields": "",
"values": false
},
"text": {}
},
"targets": [
{
"exemplar": true,
"expr": "sum(rate(traefik_service_requests_total[5m])) by (service) ",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{ service }}",
"refId": "A"
}
],
"title": "Requests by service",
"type": "piechart"
},
{
"cacheTimeout": null,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 23
},
"id": 8,
"interval": null,
"links": [],
"maxDataPoints": 3,
"options": {
"displayLabels": [],
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"values": [
"value"
]
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"sum"
],
"fields": "",
"values": false
},
"text": {}
},
"targets": [
{
"exemplar": true,
"expr": "sum(rate(traefik_entrypoint_requests_total{entrypoint =~ \"$entrypoint\"}[5m])) by (entrypoint) ",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{ entrypoint }}",
"refId": "A"
}
],
"title": "Requests by protocol",
"type": "piechart"
}
],
"schemaVersion": 27,
"style": "dark",
"tags": [
"traefik",
"prometheus"
],
"templating": {
"list": [
{
"allValue": null,
"current": {},
"datasource": "${DS_PROMETHEUS}",
"definition": "label_values(service)",
"description": null,
"error": null,
"hide": 0,
"includeAll": false,
"label": null,
"multi": false,
"name": "service",
"options": [],
"query": {
"query": "label_values(service)",
"refId": "StandardVariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"allValue": null,
"current": {},
"datasource": "${DS_PROMETHEUS}",
"definition": "",
"description": null,
"error": null,
"hide": 0,
"includeAll": true,
"label": null,
"multi": true,
"name": "entrypoint",
"options": [],
"query": {
"query": "label_values(entrypoint)",
"refId": "Prometheus-entrypoint-Variable-Query"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Traefik",
"uid": "qPdAviJmz",
"version": 10
}

View File

@ -1,27 +0,0 @@
resource "nomad_job" "exporters" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/exporters.nomad")
}
resource "nomad_job" "prometheus" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/prometheus.nomad")
}
resource "nomad_job" "grafana" {
hcl2 {
enabled = true
}
jobspec = templatefile("${path.module}/grafana.nomad", {
module_path = path.module
})
depends_on = [nomad_job.prometheus]
}

View File

@ -24,7 +24,8 @@ job "nomad-client-stalker" {
resources {
cpu = 10
memory = 10
memory = 15
memory_max = 30
}
}
}

View File

@ -0,0 +1,40 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.3.1"
hashes = [
"h1:lMueBNB2GJ/a5rweL9NPybwVfDH/Q1s+rQvt5Y+kuYs=",
"zh:1e7893a3fbebff171bcc5581b70a16eea33193c7e9dd73402ba5c04b7202f0bb",
"zh:252cfd3fee4811c83bc74406ba1bc1bbb83d6de20e50a86f93737f8f86864171",
"zh:387a7140be6dfa3f8d27f09d1eb2b9f3b84900328fe5a0478e9b3bd91a845808",
"zh:49848fa491ac26b0568b112a57d14cc49772607c7cf405e2f74dd537407214b1",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:7b9f345f5bb5f17c5d0bc3d373c25828934a3cbcdb331e0eab54eb47f1355fb2",
"zh:8e276f4de508a86e725fffc02ee891db73397c35dbd591d8918af427eeec93a1",
"zh:90b349933d2fd28f822a36128be4625bb816aa9f20ec314c79c77306f632ae87",
"zh:a0ca6fd6cd94a52684e432104d3dc170a74075f47d9d4ba725cc340a438ed75a",
"zh:a6cffc45535a0ff8206782538b3eeaef17dc93d0e1fd58bc1e6f7d5aa0f6ba1a",
"zh:c010807b5d3e03d769419787b0e5d4efa6963134e1873a413102af6bf3dd1c49",
"zh:faf962ee1981e897e99f7e528642c7e74beed37afd8eaf743e6ede24df812d80",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.2"
hashes = [
"h1:wmG0QFjQ2OfyPy6BB7mQ57WtoZZGGV07uAPQeDmIrAE=",
"zh:0ef01a4f81147b32c1bea3429974d4d104bbc4be2ba3cfa667031a8183ef88ec",
"zh:1bcd2d8161e89e39886119965ef0f37fcce2da9c1aca34263dd3002ba05fcb53",
"zh:37c75d15e9514556a5f4ed02e1548aaa95c0ecd6ff9af1119ac905144c70c114",
"zh:4210550a767226976bc7e57d988b9ce48f4411fa8a60cd74a6b246baf7589dad",
"zh:562007382520cd4baa7320f35e1370ffe84e46ed4e2071fdc7e4b1a9b1f8ae9b",
"zh:5efb9da90f665e43f22c2e13e0ce48e86cae2d960aaf1abf721b497f32025916",
"zh:6f71257a6b1218d02a573fc9bff0657410404fb2ef23bc66ae8cd968f98d5ff6",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:9647e18f221380a85f2f0ab387c68fdafd58af6193a932417299cdcae4710150",
"zh:bb6297ce412c3c2fa9fec726114e5e0508dd2638cad6a0cb433194930c97a544",
"zh:f83e925ed73ff8a5ef6e3608ad9225baa5376446349572c2449c0c0b3cf184b7",
"zh:fbef0781cb64de76b1df1ca11078aecba7800d82fd4a956302734999cfd9a4af",
]
}

50
core/oidc_client/main.tf Normal file
View File

@ -0,0 +1,50 @@
resource "random_password" "oidc_client_id" {
length = 72
override_special = "-._~"
}
resource "random_password" "oidc_secret" {
length = 72
override_special = "-._~"
}
resource "nomad_variable" "authelia_oidc_secret" {
path = "secrets/authelia/${var.name}"
items = {
client_id = resource.random_password.oidc_client_id.result
secret = resource.random_password.oidc_secret.result
secret_hash = resource.random_password.oidc_secret.bcrypt_hash
}
}
resource "nomad_variable" "authelia_access_control_oidc" {
path = "authelia/access_control/oidc_clients/${var.name}"
items = {
id = resource.random_password.oidc_client_id.result
description = var.oidc_client_config.description
authorization_policy = var.oidc_client_config.authorization_policy
redirect_uris = yamlencode(var.oidc_client_config.redirect_uris)
scopes = yamlencode(var.oidc_client_config.scopes)
}
}
resource "nomad_acl_policy" "oidc_authelia" {
count = var.job_acl != null ? 1 : 0
name = "${var.name}-authelia"
description = "Give access to shared authelia variables"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/authelia/${var.name}" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = var.job_acl.job_id
group = var.job_acl.group
task = var.job_acl.task
}
}

View File

@ -0,0 +1,11 @@
output "client_id" {
value = resource.random_password.oidc_client_id.result
}
output "secret" {
value = resource.random_password.oidc_secret.result
}
output "secret_hash" {
value = resource.random_password.oidc_secret.bcrypt_hash
}

25
core/oidc_client/vars.tf Normal file
View File

@ -0,0 +1,25 @@
variable "name" {
description = "Name of service"
type = string
}
variable "oidc_client_config" {
description = "Authelia oidc client configuration to enable oidc authentication"
type = object({
description = string
authorization_policy = optional(string, "one_factor")
redirect_uris = list(string)
scopes = list(string)
})
}
variable "job_acl" {
description = "Job ACL that should be given to the secrets"
type = object({
job_id = string
group = optional(string)
task = optional(string)
})
default = null
}

View File

@ -8,12 +8,16 @@ job "prometheus" {
mode = "bridge"
port "web" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
to = 9090
}
port "pushgateway" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
static = 9091
}
}
@ -33,12 +37,36 @@ job "prometheus" {
"traefik.enable=true",
"traefik.http.routers.prometheus.entryPoints=websecure",
]
check {
type = "http"
path = "/-/healthy"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
}
service {
name = "pushgateway"
provider = "nomad"
port = "pushgateway"
check {
type = "http"
path = "/-/healthy"
interval = "10s"
timeout = "3s"
check_restart {
limit = 3
grace = "5m"
}
}
}
task "prometheus" {
@ -48,8 +76,8 @@ job "prometheus" {
image = "prom/prometheus:v2.43.0"
ports = ["web"]
args = [
"--config.file=${NOMAD_TASK_DIR}/prometheus.yml",
"--storage.tsdb.path=${NOMAD_ALLOC_DIR}/data/tsdb",
"--config.file=$${NOMAD_TASK_DIR}/prometheus.yml",
"--storage.tsdb.path=$${NOMAD_ALLOC_DIR}/data/tsdb",
"--web.listen-address=0.0.0.0:9090",
"--web.console.libraries=/usr/share/prometheus/console_libraries",
"--web.console.templates=/usr/share/prometheus/consoles",
@ -112,7 +140,7 @@ scrape_configs:
EOF
change_mode = "signal"
change_signal = "SIGHUP"
destination = "${NOMAD_TASK_DIR}/prometheus.yml"
destination = "$${NOMAD_TASK_DIR}/prometheus.yml"
}
resources {
@ -128,7 +156,7 @@ scrape_configs:
image = "prom/pushgateway"
ports = ["pushgateway"]
args = [
"--persistence.file=${NOMAD_ALLOC_DIR}/pushgateway-persistence",
"--persistence.file=$${NOMAD_ALLOC_DIR}/pushgateway-persistence",
]
}

View File

@ -27,13 +27,11 @@ job "syslogng" {
driver = "docker"
meta = {
"diun.sort_tags" = "semver"
"diun.watch_repo" = true
"diun.include_tags" = "^[0-9]+\\.[0-9]+\\.[0-9]+$"
}
config {
image = "grafana/promtail:2.7.1"
image = "grafana/promtail:2.9.1"
ports = ["main", "metrics"]
args = ["--config.file=/etc/promtail/promtail.yml"]
@ -72,7 +70,7 @@ EOF
resources {
cpu = 50
memory = 20
memory = 50
}
}
}
@ -136,7 +134,7 @@ EOF
resources {
cpu = 50
memory = 10
memory = 50
}
}
}

View File

@ -2,20 +2,20 @@
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.17"
version = "2.1.0"
hashes = [
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"h1:ek0L7fA+4R1/BXhbutSRqlQPzSZ5aY/I2YfVehuYeEU=",
"zh:39ba4d4fc9557d4d2c1e4bf866cf63973359b73e908cce237c54384512bdb454",
"zh:40d2b66e3f3675e6b88000c145977c1d5288510c76b702c6c131d9168546c605",
"zh:40fbe575d85a083f96d4703c6b7334e9fc3e08e4f1d441de2b9513215184ebcc",
"zh:42ce6db79e2f94557fae516ee3f22e5271f0b556638eb45d5fbad02c99fc7af3",
"zh:4acf63dfb92f879b3767529e75764fef68886521b7effa13dd0323c38133ce88",
"zh:72cf35a13c2fb542cd3c8528826e2390db9b8f6f79ccb41532e009ad140a3269",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
"zh:8b8bcc136c05916234cb0c3bcc3d48fda7ca551a091ad8461ea4ab16fb6960a3",
"zh:8e1c2f924eae88afe7ac83775f000ae8fd71a04e06228edf7eddce4df2421169",
"zh:abc6e725531fc06a8e02e84946aaabc3453ecafbc1b7a442ea175db14fd9c86a",
"zh:b735fcd1fb20971df3e92f81bb6d73eef845dcc9d3d98e908faa3f40013f0f69",
"zh:ce59797282505d872903789db8f092861036da6ec3e73f6507dac725458a5ec9",
]
}

View File

@ -1,9 +1,3 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
job "traefik" {
datacenters = ["dc1"]
type = "service"
@ -20,13 +14,15 @@ job "traefik" {
update {
max_parallel = 1
# canary = 1
# auto_promote = true
canary = 1
auto_promote = false
auto_revert = true
min_healthy_time = "30s"
healthy_deadline = "5m"
}
group "traefik" {
count = 1
count = 2
network {
port "web" {
@ -41,12 +37,17 @@ job "traefik" {
static = 514
}
port "gitssh" {
static = 2222
}
port "metrics" {}
dns {
servers = [
"192.168.2.101",
"192.168.2.102",
"192.168.2.30",
"192.168.2.170",
]
}
}
@ -56,39 +57,42 @@ job "traefik" {
sticky = true
}
service {
name = "traefik"
provider = "nomad"
port = "web"
check {
type = "http"
path = "/ping"
port = "web"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.traefik.entryPoints=websecure",
"traefik.http.routers.traefik.service=api@internal",
]
}
task "traefik" {
driver = "docker"
meta = {
"diun.sort_tags" = "semver"
"diun.watch_repo" = true
"diun.include_tags" = "^[0-9]+\\.[0-9]+$"
service {
name = "traefik"
provider = "nomad"
port = "web"
check {
type = "http"
path = "/ping"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.traefik.entryPoints=websecure",
"traefik.http.routers.traefik.service=api@internal",
]
}
service {
name = "traefik-metrics"
provider = "nomad"
port = "metrics"
tags = [
"prometheus.scrape",
]
}
config {
image = "traefik:2.9"
image = "traefik:3.0"
ports = ["web", "websecure"]
ports = ["web", "websecure", "syslog", "gitssh", "metrics"]
network_mode = "host"
mount {
@ -102,6 +106,20 @@ job "traefik" {
target = "/etc/traefik/usersfile"
source = "secrets/usersfile"
}
mount {
type = "bind"
target = "/etc/traefik/certs"
source = "secrets/certs"
}
}
env = {
TRAEFIK_PROVIDERS_NOMAD_ENDPOINT_TOKEN = "${NOMAD_TOKEN}"
}
identity {
env = true
}
template {
@ -124,12 +142,9 @@ job "traefik" {
[entryPoints.websecure]
address = ":443"
[entryPoints.websecure.http.tls]
certResolver = "letsEncrypt"
[[entryPoints.websecure.http.tls.domains]]
main = "*.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>"
[entryPoints.metrics]
address = ":8989"
address = ":<< env "NOMAD_PORT_metrics" >>"
[entryPoints.syslogtcp]
address = ":514"
@ -137,6 +152,9 @@ job "traefik" {
[entryPoints.syslogudp]
address = ":514/udp"
[entryPoints.gitssh]
address = ":2222"
[api]
dashboard = true
@ -156,31 +174,9 @@ job "traefik" {
exposedByDefault = false
defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)"
[providers.nomad.endpoint]
address = "http://<< env "attr.unique.network.ip-address" >>:4646"
<< if nomadVarExists "nomad/jobs/traefik" ->>
[certificatesResolvers.letsEncrypt.acme]
email = "<< with nomadVar "nomad/jobs/traefik" >><< .acme_email >><< end >>"
# Store in /local because /secrets doesn't persist with ephemeral disk
storage = "/local/acme.json"
[certificatesResolvers.letsEncrypt.acme.dnsChallenge]
provider = "cloudflare"
resolvers = ["1.1.1.1:53", "8.8.8.8:53"]
delayBeforeCheck = 0
<<- end >>
address = "unix:///secrets/api.sock"
EOH
destination = "local/config/traefik.toml"
}
template {
data = <<EOH
{{ with nomadVar "nomad/jobs/traefik" -}}
CF_DNS_API_TOKEN={{ .domain_lego_dns }}
CF_ZONE_API_TOKEN={{ .domain_lego_dns }}
{{- end }}
EOH
destination = "secrets/cloudflare.env"
env = true
destination = "${NOMAD_TASK_DIR}/config/traefik.toml"
}
template {
@ -191,23 +187,48 @@ CF_ZONE_API_TOKEN={{ .domain_lego_dns }}
entryPoints = ["websecure"]
service = "nomad"
rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
[http.routers.hass]
{{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path }}
[http.routers.{{ .name }}]
entryPoints = ["websecure"]
service = "hass"
rule = "Host(`hass.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
service = "{{ .name }}"
rule = "Host(`{{ .subdomain }}.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`){{ with .path_prefix.Value }}&&PathPrefix(`{{ . }}`){{ end }}"
{{ $name := .name -}}
{{ with .path_prefix.Value -}}
middlewares = ["{{ $name }}@file"]
{{ end }}
{{- end }}{{ end }}
#[http.middlewares]
# {{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path -}}
# {{ $name := .name -}}
# {{ with .path_prefix.Value -}}
# [http.middlewares.{{ $name }}.stripPrefix]
# prefixes = ["{{ . }}"]
# {{ end }}
# {{- end }}{{ end }}
[http.services]
[http.services.nomad]
[http.services.nomad.loadBalancer]
[[http.services.nomad.loadBalancer.servers]]
url = "http://127.0.0.1:4646"
[http.services.hass]
[http.services.hass.loadBalancer]
[[http.services.hass.loadBalancer.servers]]
url = "http://192.168.3.65:8123"
{{ range nomadVarList "traefik_external" }}{{ with nomadVar .Path }}
[http.services.{{ .name }}]
[http.services.{{ .name }}.loadBalancer]
[[http.services.{{ .name }}.loadBalancer.servers]]
url = "{{ .url }}"
{{- end }}{{ end }}
EOH
destination = "local/config/conf/route-hashi.toml"
destination = "${NOMAD_TASK_DIR}/config/conf/route-hashi.toml"
change_mode = "noop"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
}
template {
@ -243,7 +264,39 @@ CF_ZONE_API_TOKEN={{ .domain_lego_dns }}
{{ end -}}
{{- end }}
EOH
destination = "local/config/conf/route-syslog-ng.toml"
destination = "${NOMAD_TASK_DIR}/config/conf/route-syslog-ng.toml"
change_mode = "noop"
splay = "1m"
wait {
min = "10s"
max = "20s"
}
}
template {
data = <<EOF
{{- with nomadVar "secrets/certs/_lego/certificates/__thefij_rocks_crt" }}{{ .contents }}{{ end -}}"
EOF
destination = "${NOMAD_SECRETS_DIR}/certs/_.thefij.rocks.crt"
change_mode = "noop"
}
template {
data = <<EOF
{{- with nomadVar "secrets/certs/_lego/certificates/__thefij_rocks_key" }}{{ .contents }}{{ end -}}"
EOF
destination = "${NOMAD_SECRETS_DIR}/certs/_.thefij.rocks.key"
change_mode = "noop"
}
template {
data = <<EOH
[[tls.certificates]]
certFile = "/etc/traefik/certs/_.thefij.rocks.crt"
keyFile = "/etc/traefik/certs/_.thefij.rocks.key"
EOH
destination = "${NOMAD_TASK_DIR}/config/conf/dynamic-tls.toml"
change_mode = "noop"
}
@ -253,12 +306,11 @@ CF_ZONE_API_TOKEN={{ .domain_lego_dns }}
{{ with nomadVar "nomad/jobs/traefik" }}
{{ if .usersfile }}
[http.middlewares.basic-auth.basicAuth]
# TODO: Reference secrets mount
usersFile = "/etc/traefik/usersfile"
{{- end }}
{{- end }}
EOH
destination = "local/config/conf/middlewares.toml"
destination = "${NOMAD_TASK_DIR}/config/conf/middlewares.toml"
change_mode = "noop"
}
@ -268,14 +320,13 @@ CF_ZONE_API_TOKEN={{ .domain_lego_dns }}
{{ .usersfile }}
{{- end }}
EOH
destination = "secrets/usersfile"
destination = "${NOMAD_SECRETS_DIR}/usersfile"
change_mode = "noop"
}
resources {
cpu = 100
memory = 100
memory_max = 500
memory = 150
}
}
}

View File

@ -1,16 +1,81 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
resource "nomad_job" "traefik" {
hcl2 {
enabled = true
vars = {
"base_hostname" = var.base_hostname,
}
}
jobspec = file("${path.module}/traefik.nomad")
}
resource "nomad_acl_policy" "treafik_secrets_certs_read" {
name = "traefik-secrets-certs-read"
description = "Read certs to secrets store"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/certs/*" {
capabilities = ["read"]
}
path "secrets/certs" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = resource.nomad_job.traefik.id
}
}
resource "nomad_acl_policy" "traefik_query_jobs" {
name = "traefik-query-jobs"
description = "Allow traefik to query jobs"
rules_hcl = <<EOH
namespace "default" {
capabilities = ["list-jobs", "read-job"]
}
EOH
job_acl {
job_id = resource.nomad_job.traefik.id
}
}
resource "nomad_acl_policy" "treafik_external" {
name = "traefik-exernal"
description = "Read external services"
rules_hcl = <<EOH
namespace "default" {
variables {
path "traefik_external/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
job_id = "traefik"
}
}
resource "nomad_variable" "traefik_external_hass" {
path = "traefik_external/hass"
items = {
name = "hass"
subdomain = "hass",
url = "http://192.168.3.65:8123"
}
}
resource "nomad_variable" "traefik_external_plex" {
path = "traefik_external/plex"
items = {
name = "plex"
subdomain = "plex",
url = "http://agnosticfront.thefij:32400"
}
}
resource "nomad_variable" "traefik_external_appdaemon" {
path = "traefik_external/appdaemon"
items = {
name = "appdaemon"
subdomain = "appdash",
url = "http://192.168.3.65:5050"
# path_prefix = "/add"
}
}

View File

@ -3,3 +3,9 @@ variable "base_hostname" {
description = "Base hostname to serve content from"
default = "dev.homelab"
}
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

View File

@ -1,40 +1,40 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.1"
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.0.0"
hashes = [
"h1:PexyQBRLDA+SR+sWlzYBZswry5O5h/tTfj87CaECtLc=",
"zh:1806830a3cf103e65e772a7d28fd4df2788c29a029fb2def1326bc777ad107ed",
"zh:252be544fb4c9daf09cad7d3776daf5fa66b62740d3ea9d6d499a7b1697c3433",
"zh:50985fe02a8e5ae47c75d7c28c911b25d7dc4716cff2ed55ca05889ab77a1f73",
"zh:54cf0ec90538703c66937c77e8d72a38d5af47437eb0b8b55eb5836c5d288878",
"zh:704f536c621337e06fffef6d5f49ac81f52d249f937250527c12884cb83aefed",
"zh:896d8ef6d0b555299f124eb25bce8a17d735da14ef21f07582098d301f47da30",
"zh:976277a85b0a0baafe267cc494f766448d1da5b6936ddcb3ce393bd4d22f08d2",
"zh:c7faa9a2b11bc45833a3e8e340f22f1ecf01597eaeffa7669234b4549d7dfa85",
"zh:caf851ef9c8ce482864badf7058f9278d4537112fa236efd8f1a9315801d9061",
"zh:db203435d58b0ac842540861b3307a623423275d85754c171773f3b210ae5b24",
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
"zh:f710a37190429045d109edd35de69db3b5f619919c2fa04c77a3a639fea9fd7d",
"h1:lIHIxA6ZmfyTGL3J9YIddhxlfit4ipSS09BLxkwo6L0=",
"zh:09b897d64db293f9a904a4a0849b11ec1e3fff5c638f734d82ae36d8dc044b72",
"zh:435cc106799290f64078ec24b6c59cb32b33784d609088638ed32c6d12121199",
"zh:7073444bd064e8c4ec115ca7d9d7f030cc56795c0a83c27f6668bba519e6849a",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:79d238c35d650d2d83a439716182da63f3b2767e72e4cbd0b69cb13d9b1aebfc",
"zh:7ef5f49344278fe0bbc5447424e6aa5425ff1821d010d944a444d7fa2c751acf",
"zh:92179091638c8ba03feef371c4361a790190f9955caea1fa59de2055c701a251",
"zh:a8a34398851761368eb8e7c171f24e55efa6e9fdbb5c455f6dec34dc17f631bc",
"zh:b38fd5338625ebace5a4a94cea1a28b11bd91995d834e318f47587cfaf6ec599",
"zh:b71b273a2aca7ad5f1e07c767b25b5a888881ba9ca93b30044ccc39c2937f03c",
"zh:cd14357e520e0f09fb25badfb4f2ee37d7741afdc3ed47c7bcf54c1683772543",
"zh:e05e025f4bb95138c3c8a75c636e97cd7cfd2fc1525b0c8bd097db8c5f02df6e",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.17"
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}

View File

@ -3,17 +3,25 @@ job "lldap" {
type = "service"
priority = 80
update {
auto_revert = true
}
group "lldap" {
network {
mode = "bridge"
port "web" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
}
port "ldap" {
%{~ if use_wesher ~}
host_network = "wesher"
%{~ endif ~}
}
port "tls" {}
@ -46,47 +54,75 @@ job "lldap" {
driver = "docker"
config {
image = "nitnelave/lldap:latest"
image = "ghcr.io/lldap/lldap:v0.5"
ports = ["ldap", "web"]
args = ["run", "--config-file", "${NOMAD_SECRETS_DIR}/lldap_config.toml"]
args = ["run", "--config-file", "$${NOMAD_TASK_DIR}/lldap_config.toml"]
}
env = {
"LLDAP_VERBOSE" = "true"
"LLDAP_LDAP_PORT" = "${NOMAD_PORT_ldap}"
"LLDAP_HTTP_PORT" = "${NOMAD_PORT_web}"
"LLDAP_LDAP_PORT" = "$${NOMAD_PORT_ldap}"
"LLDAP_HTTP_PORT" = "$${NOMAD_PORT_web}"
"LLDAP_DATABASE_URL_FILE" = "$${NOMAD_SECRETS_DIR}/database_url.txt"
"LLDAP_KEY_SEED_FILE" = "$${NOMAD_SECRETS_DIR}/key_seed.txt"
"LLDAP_JWT_SECRET_FILE" = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
"LLDAP_USER_PASS_FILE" = "$${NOMAD_SECRETS_DIR}/user_pass.txt"
"LLDAP_SMTP_OPTIONS__PASSWORD_FILE" = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
}
template {
data = <<EOH
ldap_base_dn = "{{ with nomadVar "nomad/jobs" }}{{ .ldap_base_dn }}{{ end }}"
{{ with nomadVar "nomad/jobs/lldap" -}}
database_url = "mysql://{{ .db_user }}:{{ .db_pass }}@127.0.0.1:3306/{{ .db_name }}"
key_seed = "{{ .key_seed }}"
jwt_secret = "{{ .jwt_secret }}"
{{ with nomadVar "secrets/ldap" -}}
ldap_user_dn = "{{ .admin_user }}"
ldap_user_email = "{{ .admin_email }}"
ldap_user_pass = "{{ .admin_password }}"
{{ end -}}
{{ with nomadVar "nomad/jobs/lldap" -}}
[smtp_options]
from = "{{ .smtp_from }}"
reply_to = "{{ .smtp_reply_to }}"
enable_password_reset = true
{{- end }}
# TODO: Better access to SMTP creds using nomad ACLs
{{ with nomadVar "nomad/jobs" -}}
server = "{{ .smtp_server }}"
port = {{ .smtp_port }}
tls_required = {{ .smtp_tls.Value | toLower }}
user = "{{ .smtp_user }}"
password = "{{ .smtp_password }}"
{{ end -}}
{{ with nomadVar "secrets/smtp" -}}
server = "{{ .server }}"
port = {{ .port }}
tls_required = {{ .tls.Value | toLower }}
user = "{{ .user }}"
{{ end -}}
EOH
destination = "${NOMAD_SECRETS_DIR}/lldap_config.toml"
destination = "$${NOMAD_TASK_DIR}/lldap_config.toml"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}mysql://{{ .db_user }}:{{ .db_pass }}@127.0.0.1:3306/{{ .db_name }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/database_url.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}{{ .key_seed }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/key_seed.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"nomad/jobs/lldap\" }}{{ .jwt_secret }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/jwt_secret.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"secrets/ldap\" }}{{ .admin_password }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/user_pass.txt"
change_mode = "restart"
}
template {
data = "{{ with nomadVar \"secrets/smtp\" }}{{ .password }}{{ end }}"
destination = "$${NOMAD_SECRETS_DIR}/smtp_password.txt"
change_mode = "restart"
}
@ -109,10 +145,10 @@ password = "{{ .smtp_password }}"
image = "mariadb:10"
args = [
"/usr/bin/timeout",
"2m",
"20m",
"/bin/bash",
"-c",
"until /usr/bin/mysql --defaults-extra-file=${NOMAD_SECRETS_DIR}/my.cnf < ${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
"until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done",
]
}
@ -122,12 +158,11 @@ password = "{{ .smtp_password }}"
host=127.0.0.1
port=3306
user=root
# TODO: Use via lesser scoped access
{{ with nomadVar "nomad/jobs/lldap/lldap/bootstrap" -}}
{{ with nomadVar "secrets/mysql" -}}
password={{ .mysql_root_password }}
{{ end -}}
EOF
destination = "${NOMAD_SECRETS_DIR}/my.cnf"
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
}
template {
@ -146,7 +181,7 @@ GRANT ALL ON `{{ .db_name }}`.*
SELECT 'NOOP';
{{ end -}}
EOF
destination = "${NOMAD_SECRETS_DIR}/bootstrap.sql"
destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql"
}
resources {
@ -164,9 +199,9 @@ SELECT 'NOOP';
}
config {
image = "alpine:3.17"
image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
args = ["/bin/sh", "${NOMAD_TASK_DIR}/start.sh"]
}
resources {
@ -174,15 +209,6 @@ SELECT 'NOOP';
memory = 100
}
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
EOF
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template {
data = <<EOF
syslog = no
@ -193,7 +219,7 @@ delay = yes
accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:{{ env "NOMAD_PORT_ldap" }}
ciphers = PSK
PSKsecrets = {{ env "NOMAD_TASK_DIR" }}/stunnel_psk.txt
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
[mysql_client]
client = yes
@ -203,23 +229,23 @@ connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
EOF
destination = "${NOMAD_TASK_DIR}/stunnel.conf"
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/lldap/lldap/stunnel" -}}
{{ .allowed_psks }}
{{- end }}
{{ range nomadVarList "secrets/ldap/allowed_psks" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }}
{{ end -}}
EOF
destination = "${NOMAD_TASK_DIR}/stunnel_psk.txt"
destination = "$${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
template {
data = <<EOF
{{- with nomadVar "nomad/jobs/lldap/lldap/stunnel" }}{{ .mysql_stunnel_psk }}{{ end -}}
{{- with nomadVar "secrets/mysql/allowed_psks/lldap" }}{{ .psk }}{{ end -}}
EOF
destination = "${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
}

123
databases/lldap.tf Normal file
View File

@ -0,0 +1,123 @@
resource "nomad_job" "lldap" {
jobspec = templatefile("${path.module}/lldap.nomad", {
use_wesher = var.use_wesher,
})
depends_on = [resource.nomad_job.mysql-server]
# Block until deployed as there are servics dependent on this one
detach = false
}
# Give access to ldap secrets
resource "nomad_acl_policy" "lldap_ldap_secrets" {
name = "lldap-secrets-ldap"
description = "Give access to LDAP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/ldap/*" {
capabilities = ["read"]
}
path "secrets/ldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "lldap_ldap_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "lldap_ldap_psk" {
path = "secrets/ldap/allowed_psks/ldap"
items = {
psk = "lldap:${resource.random_password.lldap_ldap_psk.result}"
}
}
# Give access to smtp secrets
resource "nomad_acl_policy" "lldap_smtp_secrets" {
name = "lldap-secrets-smtp"
description = "Give access to SMTP secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/smtp" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "lldap"
}
}
# Generate secrets and policies for access to MySQL
resource "nomad_acl_policy" "lldap_mysql_bootstrap_secrets" {
name = "lldap-secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "bootstrap"
}
}
resource "random_password" "lldap_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "lldap_mysql_psk" {
path = "secrets/mysql/allowed_psks/lldap"
items = {
psk = "lldap:${resource.random_password.lldap_mysql_psk.result}"
}
}
resource "nomad_acl_policy" "lldap_mysql_psk" {
name = "lldap-secrets-mysql-psk"
description = "Give access to MySQL PSK secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql/allowed_psks/lldap" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.lldap.id
job_id = "lldap"
group = "lldap"
task = "stunnel"
}
}

View File

@ -1,46 +0,0 @@
resource "nomad_job" "mysql-server" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/mysql.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_job" "postgres-server" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/postgres.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_job" "redis" {
for_each = toset(["blocky", "authelia"])
hcl2 {
enabled = true
}
jobspec = templatefile("${path.module}/redis.nomad",
{
name = each.key,
}
)
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_job" "rediscommander" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/rediscommander.nomad")
}

View File

@ -3,6 +3,10 @@ job "mysql-server" {
type = "service"
priority = 80
update {
auto_revert = true
}
group "mysql-server" {
count = 1
@ -73,7 +77,7 @@ MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
resources {
cpu = 300
memory = 1536
memory = 1600
}
}
@ -81,9 +85,9 @@ MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
driver = "docker"
config {
image = "alpine:3.17"
image = "iamthefij/stunnel:1.0.0"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
args = ["/bin/sh", "${NOMAD_TASK_DIR}/start.sh"]
}
resources {
@ -91,15 +95,6 @@ MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
memory = 100
}
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel ${NOMAD_TASK_DIR}/stunnel.conf
EOF
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template {
data = <<EOF
syslog = no
@ -117,9 +112,9 @@ PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/mysql-server" -}}
{{ .allowed_psks }}
{{- end }}
{{ range nomadVarList "secrets/mysql/allowed_psks" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }}
{{ end -}}
EOF
destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}

41
databases/mysql.tf Normal file
View File

@ -0,0 +1,41 @@
resource "nomad_job" "mysql-server" {
jobspec = file("${path.module}/mysql.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_acl_policy" "secrets_mysql" {
name = "secrets-mysql"
description = "Give access to MySQL secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/mysql" {
capabilities = ["read"]
}
path "secrets/mysql/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.mysql-server.id
job_id = "mysql-server"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "mysql_mysql_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "mysql_mysql_psk" {
path = "secrets/mysql/allowed_psks/mysql"
items = {
psk = "mysql:${resource.random_password.mysql_mysql_psk.result}"
}
}

View File

@ -3,6 +3,10 @@ job "postgres-server" {
type = "service"
priority = 80
update {
auto_revert = true
}
group "postgres-server" {
count = 1
@ -17,15 +21,16 @@ job "postgres-server" {
mode = "bridge"
port "db" {
to = 5432
host_network = "wesher"
static = 5432
}
port "tls" {}
}
volume "postgres-data" {
type = "host"
read_only = false
source = "mysql-data"
source = "postgres-data"
}
service {
@ -34,6 +39,12 @@ job "postgres-server" {
port = "db"
}
service {
name = "postgres-tls"
provider = "nomad"
port = "tls"
}
task "postgres-server" {
driver = "docker"
@ -44,7 +55,7 @@ job "postgres-server" {
volume_mount {
volume = "postgres-data"
destination = "/var/lib/postgresql"
destination = "/var/lib/postgresql/data"
read_only = false
}
@ -65,8 +76,48 @@ POSTGRES_PASSWORD={{ .superuser_pass }}
}
resources {
cpu = 300
memory = 256
cpu = 500
memory = 800
memory_max = 1500
}
}
task "stunnel" {
driver = "docker"
config {
image = "iamthefij/stunnel:1.0.0"
args = ["${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[postgres_server]
accept = {{ env "NOMAD_PORT_tls" }}
connect = 127.0.0.1:5432
ciphers = PSK
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
EOF
destination = "${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ range nomadVarList "secrets/postgres/allowed_psks" -}}
{{ with nomadVar .Path }}{{ .psk }}{{ end }}
{{ end -}}
EOF
destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
}
}

41
databases/postgres.tf Normal file
View File

@ -0,0 +1,41 @@
resource "nomad_job" "postgres-server" {
jobspec = file("${path.module}/postgres.nomad")
# Block until deployed as there are servics dependent on this one
detach = false
}
resource "nomad_acl_policy" "secrets_postgres" {
name = "secrets-postgres"
description = "Give access to Postgres secrets"
rules_hcl = <<EOH
namespace "default" {
variables {
path "secrets/postgres" {
capabilities = ["read"]
}
path "secrets/postgres/*" {
capabilities = ["read"]
}
}
}
EOH
job_acl {
# job_id = resource.nomad_job.postgres-server.id
job_id = "postgres-server"
}
}
# Create self-scoped psk so that config is valid at first start
resource "random_password" "postgres_postgres_psk" {
length = 32
override_special = "!@#%&*-_="
}
resource "nomad_variable" "postgres_postgres_psk" {
path = "secrets/postgres/allowed_psks/postgres"
items = {
psk = "postgres:${resource.random_password.postgres_postgres_psk.result}"
}
}

View File

@ -3,6 +3,10 @@ job "redis-${name}" {
type = "service"
priority = 80
update {
auto_revert = true
}
group "cache" {
count = 1
@ -35,7 +39,7 @@ job "redis-${name}" {
resources {
cpu = 100
memory = 128
memory = 64
memory_max = 512
}
}
@ -44,23 +48,14 @@ job "redis-${name}" {
driver = "docker"
config {
image = "alpine:3.17"
image = "iamthefij/stunnel:1.0.0"
args = ["$${NOMAD_TASK_DIR}/stunnel.conf"]
ports = ["tls"]
args = ["/bin/sh", "$${NOMAD_TASK_DIR}/start.sh"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel $${NOMAD_TASK_DIR}/stunnel.conf
EOF
destination = "$${NOMAD_TASK_DIR}/start.sh"
cpu = 50
memory = 15
}
template {

12
databases/redis.tf Normal file
View File

@ -0,0 +1,12 @@
resource "nomad_job" "redis" {
for_each = toset(["blocky", "authelia"])
jobspec = templatefile("${path.module}/redis.nomad",
{
name = each.key,
}
)
# Block until deployed as there are servics dependent on this one
detach = false
}

View File

@ -1,99 +0,0 @@
job "rediscommander" {
datacenters = ["dc1"]
type = "service"
group "rediscommander" {
count = 1
network {
mode = "bridge"
port "main" {
host_network = "wesher"
to = 8081
}
}
service {
name = "rediscommander"
provider = "nomad"
port = "main"
tags = [
"traefik.enable=true",
"traefik.http.routers.rediscommander.entryPoints=websecure",
]
}
task "rediscommander" {
driver = "docker"
config {
image = "rediscommander/redis-commander:latest"
ports = ["main"]
}
template {
data = <<EOH
REDIS_HOSTS=stunnel:127.0.0.1:6379
EOH
env = true
destination = "env"
}
resources {
cpu = 50
memory = 50
}
}
task "redis-stunnel" {
driver = "docker"
config {
image = "alpine:3.17"
args = ["/bin/sh", "${NOMAD_TASK_DIR}/start.sh"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
EOF
destination = "${NOMAD_TASK_DIR}/start.sh"
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[redis_client]
client = yes
accept = 127.0.0.1:6379
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis-tls" -}}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/stunnel_psk.txt
EOF
destination = "${NOMAD_TASK_DIR}/stunnel.conf"
}
template {
data = <<EOF
{{ with nomadVar "nomad/jobs/rediscommander" -}}
{{ .redis_stunnel_psk }}
{{- end }}
EOF
destination = "${NOMAD_SECRETS_DIR}/stunnel_psk.txt"
}
}
}
}

5
databases/vars.tf Normal file
View File

@ -0,0 +1,5 @@
variable "use_wesher" {
type = bool
description = "Indicates whether or not services should expose themselves on the wesher network"
default = true
}

34
main.tf Normal file
View File

@ -0,0 +1,34 @@
module "databases" {
source = "./databases"
use_wesher = var.use_wesher
}
module "core" {
source = "./core"
base_hostname = var.base_hostname
use_wesher = var.use_wesher
# Metrics and Blocky depend on databases
depends_on = [module.databases]
}
module "services" {
source = "./services"
base_hostname = var.base_hostname
use_wesher = var.use_wesher
# NOTE: It may be possible to flip this and core so core templates don't
# need to be rerendered every time a service goes up or down.
depends_on = [module.databases, module.core]
}
module "backups" {
source = "./backups"
use_wesher = var.use_wesher
depends_on = [module.databases, module.services, module.core]
}

View File

@ -1,5 +1,5 @@
pre-commit
detect-secrets==1.4.0 # This should match what is in .pre-commit-config.yaml
ansible
python-consul
hvac
python-nomad
netaddr

128
scripts/nomad_backup_job.py Executable file
View File

@ -0,0 +1,128 @@
#! /usr/bin/env python3
from os import environ
from time import sleep
from typing import Any
from typing import cast
from argparse import ArgumentParser
import requests
NOMAD_ADDR = environ.get("NOMAD_ADDR", "http://127.0.0.1:4646")
NOMAD_TOKEN = environ.get("NOMAD_TOKEN")
def nomad_req(
*path: str,
params: dict[str, Any] | None = None,
data: dict[str, Any] | None = None,
method="GET",
) -> list[dict[str, Any]] | dict[str, Any] | str:
headers = {
"Content-Type": "application/json",
}
if NOMAD_TOKEN:
headers["X-Nomad-Token"] = NOMAD_TOKEN
response = requests.request(
method,
f"{NOMAD_ADDR}/v1/{'/'.join(path)}",
params=params,
json=data,
headers=headers,
)
try:
response.raise_for_status()
except requests.exceptions.RequestException as ex:
print(response.text)
raise ex
try:
return response.json()
except requests.exceptions.JSONDecodeError:
return response.text
def wait_for_job_alloc_status(job_id: str, status: str):
allocs = nomad_req("job", job_id, "allocations")
allocs = cast(list[dict[str, Any]], allocs)
while not all(alloc["ClientStatus"] == status for alloc in allocs):
print(f"Waiting for all allocs to reach {status}...")
sleep(5)
allocs = nomad_req("job", job_id, "allocations")
allocs = cast(list[dict[str, Any]], allocs)
def wait_for_eval_status(eval_id: str, status: str):
eval = nomad_req("evaluation", eval_id)
eval = cast(dict[str, Any], eval)
while eval["Status"] != status:
print(f"Waiting for eval to reach {status}...")
sleep(5)
eval = nomad_req("evaluation", eval_id)
eval = cast(dict[str, Any], eval)
parser = ArgumentParser(
description="Execute one off backups and restores of services",
)
parser.add_argument("service_name", help="Name of the service to backup or restore")
parser.add_argument("-a", "--action", default="backup", choices=("backup", "restore"), help="Action to take, backup or restore")
parser.add_argument("-s", "--snapshot", default="latest", help="Backup snapshot to restore, if restore is the chosen action")
parser.add_argument("-x", "--extra-safe", action="store_true", help="Perform extra safe backup or restore by stoping target job first")
args = parser.parse_args()
service_name = args.service_name
service_info = nomad_req("service", service_name, params={"choose": "1|backups"})
if not service_info:
print(f"Could not find service {service_name}")
exit(1)
service_info = cast(list[dict[str, Any]], service_info)
node_id = service_info[0]["NodeID"]
job_id = service_info[0]["JobID"]
node = nomad_req("node", node_id)
node = cast(dict[str, Any], node)
node_name = node["Name"]
backup_job_name = f"backup-oneoff-{node_name}"
backup_job = nomad_req("job", backup_job_name)
if not backup_job:
print(f"Could not find backup job {backup_job_name} for {service_name}")
if args.extra_safe:
print("Stopping job allocs")
stop_job = nomad_req("job", job_id, method="DELETE")
print(stop_job)
wait_for_job_alloc_status(job_id, "complete")
backup_job = cast(dict[str, Any], backup_job)
backup_job_id = backup_job["ID"]
dispatch = nomad_req(
"job",
backup_job_id,
"dispatch",
data={
"Payload": None,
"Meta": {
"job_name": service_name,
"task": args.action,
"snapshot": args.snapshot,
},
},
method="POST",
)
dispatch = cast(dict[str, Any], dispatch)
print(dispatch)
if args.extra_safe:
print(f"Wait for {args.action} to finish")
wait_for_eval_status(dispatch["EvalID"], "complete")
print("Backup complete. Verify success and restart job")
# If auto restarting, get versions and "revert" to version n-1 since n will be the recently stopped version

View File

@ -0,0 +1,99 @@
#! /usr/bin/env python3
from argparse import ArgumentParser
from os import environ
from typing import Any
from typing import cast
import requests
NOMAD_ADDR = environ.get("NOMAD_ADDR", "http://127.0.0.1:4646")
NOMAD_TOKEN = environ.get("NOMAD_TOKEN")
def nomad_req(
*path: str, params: dict[str, Any] | None = None, method="GET"
) -> list[dict[str, Any]] | dict[str, Any] | str:
headers = {}
if NOMAD_TOKEN:
headers["X-Nomad-Token"] = NOMAD_TOKEN
response = requests.request(
method,
f"{NOMAD_ADDR}/v1/{'/'.join(path)}",
params=params,
headers=headers,
)
response.raise_for_status()
try:
return response.json()
except requests.exceptions.JSONDecodeError:
return response.text
def extract_job_services(job: dict[str, Any]) -> dict[str, str]:
services: dict[str, str] = dict()
for group in job["TaskGroups"]:
for service in group.get("Services") or []:
services[service["Name"]] = group["Name"]
for task in group["Tasks"]:
for service in task.get("Services") or []:
services[service["Name"]] = group["Name"]
return services
exit_code = 0
parser = ArgumentParser(
description="Checks for missing services and optionally restarts their allocs.",
)
parser.add_argument("-r", "--restart", action="store_true", help="Restart allocs for missing services")
args = parser.parse_args()
for job in nomad_req("jobs"):
job = cast(dict[str, Any], job)
if job["Type"] in ("batch", "sysbatch"):
continue
if job["Status"] != "running":
print(f"WARNING: job {job['Name']} is {job['Status']}")
continue
job_detail = nomad_req("job", job["ID"])
job_detail = cast(dict[str, Any], job_detail)
expected_services = extract_job_services(job_detail)
found_services: set[str] = set()
for service in nomad_req("job", job_detail["ID"], "services"):
service = cast(dict[str, Any], service)
found_services.add(service["ServiceName"])
missing_services = set(expected_services) - found_services
restart_groups: set[str] = set()
for missing_service in missing_services:
print(f"ERROR: Missing service {missing_service} for job {job_detail['Name']}")
# print(job)
exit_code = 1
# Add group associated with missing service to set
restart_groups.add(expected_services[missing_service])
if not restart_groups or not args.restart:
continue
# Get allocts for groups that are missing services
restart_allocs: set[str] = set()
for allocation in nomad_req("job", job_detail["ID"], "allocations"):
allocation = cast(dict[str, Any], allocation)
if allocation["ClientStatus"] == "running" and allocation["TaskGroup"] in restart_groups:
restart_allocs.add(allocation["ID"])
# Restart allocs associated with missing services
for allocation in restart_allocs:
print(f"INFO: Restarting allocation {allocation}")
nomad_req("client", "allocation", allocation, "restart")
exit(exit_code)

View File

@ -0,0 +1,72 @@
#! /usr/bin/env python3
from argparse import ArgumentParser
from os import environ
from typing import Any
from typing import cast
import requests
NOMAD_ADDR = environ.get("NOMAD_ADDR", "http://127.0.0.1:4646")
NOMAD_TOKEN = environ.get("NOMAD_TOKEN")
def nomad_req(
*path: str, params: dict[str, Any] | None = None, method="GET"
) -> list[dict[str, Any]] | dict[str, Any] | str:
headers = {}
if NOMAD_TOKEN:
headers["X-Nomad-Token"] = NOMAD_TOKEN
response = requests.request(
method,
f"{NOMAD_ADDR}/v1/{'/'.join(path)}",
params=params,
headers=headers,
)
response.raise_for_status()
try:
return response.json()
except requests.exceptions.JSONDecodeError:
return response.text
exit_code = 0
parser = ArgumentParser(
description="Checks for orphaned services and optionally deletes them.",
)
parser.add_argument("-d", "--delete", action="store_true", help="Delete orphan services")
args = parser.parse_args()
for namespace in nomad_req("services"):
namespace = cast(dict[str, Any], namespace)
for service in namespace["Services"]:
service_name = service["ServiceName"]
for service_instance in nomad_req("service", service_name):
service_instance = cast(dict[str, Any], service_instance)
service_id = service_instance["ID"]
alloc_id = service_instance["AllocID"]
alloc_found = True
try:
alloc = nomad_req("allocation", alloc_id)
continue
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
alloc_found = False
message = f"alloc {alloc_id} not found for {service_name}."
if args.delete:
message += f" Deleting {service_id}"
print(message)
else:
raise e
if not alloc_found and args.delete:
nomad_req("service", service_name, service_id, method="DELETE")
exit(exit_code)

View File

@ -1,5 +0,0 @@
module "services" {
source = "./services"
depends_on = [module.databases, module.core]
}

View File

@ -2,20 +2,39 @@
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.19"
version = "2.1.1"
hashes = [
"h1:EdBny2gaLr/IE+l+6csyCKeIGFMYZ/4tHKpcbS7ArgE=",
"zh:2f3ceeb3318a6304026035b0ac9ee3e52df04913bb9ee78827e58c5398b41254",
"zh:3fbe76c7d957d20dfe3c8c0528b33084651f22a95be9e0452b658e0922916e2a",
"zh:595671a05828cfe6c42ef73aac894ac39f81a52cc662a76f37eb74ebe04ddf75",
"zh:5d76e8788d2af3e60daf8076babf763ec887480bbb9734baccccd8fcddf4f03e",
"zh:676985afeaca6e67b22d60d43fd0ed7055763029ffebc3026089fe2fd3b4a288",
"zh:69152ce6164ac999a640cff962ece45208270e1ac37c10dac484eeea5cf47275",
"zh:6da0b15c05b81f947ec8e139bd81eeeb05c0d36eb5a967b985d0625c60998b40",
"h1:liQBgBXfQEYmwpoGZUfSsu0U0t/nhvuRZbMhaMz7wcQ=",
"zh:28bc6922e8a21334568410760150d9d413d7b190d60b5f0b4aab2f4ef52efeeb",
"zh:2d4283740e92ce1403875486cd5ff2c8acf9df28c190873ab4d769ce37db10c1",
"zh:457e16d70075eae714a7df249d3ba42c2176f19b6750650152c56f33959028d9",
"zh:49ee88371e355c00971eefee6b5001392431b47b3e760a5c649dda76f59fb8fa",
"zh:614ad3bf07155ed8a5ced41dafb09042afbd1868490a379654b3e970def8e33d",
"zh:75be7199d76987e7549e1f27439922973d1bf27353b44a593bfbbc2e3b9f698f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:822c0a3bbada5e38099a379db8b2e339526843699627c3be3664cc3b3752bab7",
"zh:af23af2f98a84695b25c8eba7028a81ad4aad63c44aefb79e01bbe2dc82e7f78",
"zh:e36cac9960b7506d92925b667254322520966b9c3feb3ca6102e57a1fb9b1761",
"zh:ffd1e096c1cc35de879c740a91918e9f06b627818a3cb4b1d87b829b54a6985f",
"zh:888e14a24410d56b37212fbea373a3e0401d0ff8f8e4f4dd00ba8b29de9fed39",
"zh:aa261925e8b152636a0886b3a2149864707632d836e98a88dacea6cfb6302082",
"zh:ac10cefb4064b3bb63d4b0379624a416a45acf778eac0004466f726ead686196",
"zh:b1a3c8b4d5b2dc9b510eac5e9e02665582862c24eb819ab74f44d3d880246d4f",
"zh:c552e2fe5670b6d3ad9a5faf78e3a27197eeedbe2b13928d2c491fa509bc47c7",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.0"
hashes = [
"h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=",
"zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d",
"zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211",
"zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829",
"zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d",
"zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17",
"zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21",
"zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839",
"zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0",
"zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
"zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
]
}

18
services/adminer.tf Normal file
View File

@ -0,0 +1,18 @@
module "adminer" {
source = "./service"
name = "adminer"
image = "adminer"
ingress = true
service_port = 8080
use_wesher = var.use_wesher
use_mysql = true
use_postgres = true
resources = {
cpu = 50
memory = 50
}
}

View File

@ -1,21 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}

View File

@ -1,28 +0,0 @@
resource "nomad_job" "backup" {
hcl2 {
enabled = true
}
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
batch_node = null,
})
}
resource "nomad_job" "backup-oneoff" {
# TODO: Get list of nomad hosts dynamically
for_each = toset(["n1", "n2"])
# for_each = toset([
# for node in data.consul_service.nomad.service :
# node.node_name
# ])
hcl2 {
enabled = true
}
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = path.module,
batch_node = each.key,
})
}

View File

@ -1,35 +0,0 @@
job "nextcloud" {
schedule = "0 * * * *"
config {
repo = "rclone::ftp,env_auth:/nomad/nextcloud"
passphrase = env("BACKUP_PASSPHRASE")
}
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")
database = env("MYSQL_DATABASE")
username = env("MYSQL_USER")
password = env("MYSQL_PASSWORD")
no_tablespaces = true
dump_to = "/local/dump.sql"
}
backup {
paths = ["/data/nextcloud"]
# Because path is absolute
restore_opts {
Target = "/"
}
}
forget {
KeepHourly = 24
KeepDaily = 30
KeepWeekly = 8
KeepMonthly = 6
KeepYearly = 2
Prune = true
}
}

Some files were not shown because too many files have changed in this diff Show More