WIP: Moving vars and service discovery to Nomad
Starting with core
This commit is contained in:
parent
4f5f4e0fe6
commit
2d6fc3d9ef
@ -193,25 +193,7 @@
|
||||
"line_number": 252,
|
||||
"is_secret": false
|
||||
}
|
||||
],
|
||||
"core/syslogng.nomad": [
|
||||
{
|
||||
"type": "Base64 High Entropy String",
|
||||
"filename": "core/syslogng.nomad",
|
||||
"hashed_secret": "298b5925fe7c7458cb8a12a74621fdedafea5ad6",
|
||||
"is_verified": false,
|
||||
"line_number": 159,
|
||||
"is_secret": false
|
||||
},
|
||||
{
|
||||
"type": "Base64 High Entropy String",
|
||||
"filename": "core/syslogng.nomad",
|
||||
"hashed_secret": "3a1cec2d3c3de7e4da4d99c6731ca696c24b72b4",
|
||||
"is_verified": false,
|
||||
"line_number": 159,
|
||||
"is_secret": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"generated_at": "2022-11-11T21:26:53Z"
|
||||
"generated_at": "2022-11-21T00:23:03Z"
|
||||
}
|
||||
|
@ -1,78 +1,40 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/consul" {
|
||||
version = "2.14.0"
|
||||
hashes = [
|
||||
"h1:lJWOdlqevg6FQLFlfM3tGOsy9yPrjm9/vqkfzVrqT/A=",
|
||||
"h1:xRwktNwLL3Vo43F7v73tfcgbcnjCE2KgCzcNrsQJ1cc=",
|
||||
"zh:06dcca1f76b839af8f86c7b6f65b944003a7a35b30b865b3884f48e2c42f9aee",
|
||||
"zh:16111df6a485e21cee6ca33cb863434baa1ca360c819c8e2af85e465c1361d2b",
|
||||
"zh:26b59c82ac2861b2651c1fa31955c3e7790e3c2d5d097f22aa34d3c294da63cf",
|
||||
"zh:70fd6853099126a602d5ac26caa80214a4a8a38f0cad8a5e3b7bef49923419d3",
|
||||
"zh:7d4f0061d6fb86e0a5639ed02381063b868245082ec4e3a461bcda964ed00fcc",
|
||||
"zh:a48cbf57d6511922362d5b0f76f449fba7a550c9d0702635fabb43b4f0a09fc0",
|
||||
"zh:bb54994a53dd8e1ff84ca50742ce893863dc166fd41b91d951f4cb89fe6a6bc0",
|
||||
"zh:bc61b19ee3c8d55a9915a3ad84203c87bfd0d57eca8eec788524b14e8b67f090",
|
||||
"zh:cbe3238e756ada23c1e7c97c42a5c72bf810dc5bd1265c9f074c3e739d1090b0",
|
||||
"zh:e30198054239eab46493e59956b9cd8c376c3bbd9515ac102a96d1fbd32e423f",
|
||||
"zh:e74365dba529a0676107e413986d7be81c2125c197754ce69e3e89d8daa53153",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/external" {
|
||||
version = "2.2.2"
|
||||
version = "2.2.3"
|
||||
hashes = [
|
||||
"h1:e7RpnZ2PbJEEPnfsg7V0FNwbfSk0/Z3FdrLsXINBmDY=",
|
||||
"zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca",
|
||||
"zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28",
|
||||
"zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b",
|
||||
"zh:719d6ef39c50e4cffc67aa67d74d195adaf42afcf62beab132dafdb500347d39",
|
||||
"h1:uvOYRWcVIqOZSl8YjjaB18yZFz1AWIt2CnK7O45rckg=",
|
||||
"zh:184ecd339d764de845db0e5b8a9c87893dcd0c9d822167f73658f89d80ec31c9",
|
||||
"zh:2661eaca31d17d6bbb18a8f673bbfe3fe1b9b7326e60d0ceb302017003274e3c",
|
||||
"zh:2c0a180f6d1fc2ba6e03f7dfc5f73b617e45408681f75bca75aa82f3796df0e4",
|
||||
"zh:4b92ae44c6baef4c4952c47be00541055cb5280dd3bc8031dba5a1b2ee982387",
|
||||
"zh:5641694d5daf3893d7ea90be03b6fa575211a08814ffe70998d5adb8b59cdc0a",
|
||||
"zh:5bd55a2be8a1c20d732ac9c604b839e1cadc8c49006315dffa4d709b6874df32",
|
||||
"zh:6e0ef5d11e1597202424b7d69b9da7b881494c9b13a3d4026fc47012dc651c79",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:7fbfc4d37435ac2f717b0316f872f558f608596b389b895fcb549f118462d327",
|
||||
"zh:8ac71408204db606ce63fe8f9aeaf1ddc7751d57d586ec421e62d440c402e955",
|
||||
"zh:a4cacdb06f114454b6ed0033add28006afa3f65a0ea7a43befe45fc82e6809fb",
|
||||
"zh:bb5ce3132b52ae32b6cc005bc9f7627b95259b9ffe556de4dad60d47d47f21f0",
|
||||
"zh:bb60d2976f125ffd232a7ccb4b3f81e7109578b23c9c6179f13a11d125dca82a",
|
||||
"zh:f9540ecd2e056d6e71b9ea5f5a5cf8f63dd5c25394b9db831083a9d4ea99b372",
|
||||
"zh:ffd998b55b8a64d4335a090b6956b4bf8855b290f7554dd38db3302de9c41809",
|
||||
"zh:9e19f89fa25004d3b926a8d15ea630b4bde62f1fa4ed5e11a3d27aabddb77353",
|
||||
"zh:b763efdd69fd097616b4a4c89cf333b4cee9699ac6432d73d2756f8335d1213f",
|
||||
"zh:e3b561efdee510b2b445f76a52a902c52bee8e13095e7f4bed7c80f10f8d294a",
|
||||
"zh:fe660bb8781ee043a093b9a20e53069974475dcaa5791a1f45fd03c61a26478a",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/nomad" {
|
||||
version = "1.4.16"
|
||||
version = "1.4.19"
|
||||
hashes = [
|
||||
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
|
||||
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
||||
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
|
||||
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
|
||||
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
|
||||
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
|
||||
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
|
||||
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
|
||||
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
|
||||
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
|
||||
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/vault" {
|
||||
version = "3.3.1"
|
||||
hashes = [
|
||||
"h1:SOTmxGynxFf1hECFq0/FGujGQZNktePze/4mfdR/iiU=",
|
||||
"h1:i7EC2IF0KParI+JPA5ZtXJrAn3bAntW5gEMLvOXwpW4=",
|
||||
"zh:3e1866037f43c1083ff825dce2a9e3853c757bb0121c5ae528ee3cf3f99b4113",
|
||||
"zh:49636cc5c4939134e098c4ec0163c41fae103f24d7e1e8fc0432f8ad93d596a0",
|
||||
"zh:5258a7001719c4aeb84f4c4da7115b795da4794754938a3c4176a4b578fe93a1",
|
||||
"zh:7461738691e2e8ea91aba73d4351cfbc30fcaedcf0e332c9d35ef215f93aa282",
|
||||
"zh:815529478e33a6727273b08340a4c62c9aeb3da02abf8f091bb4f545c8451fce",
|
||||
"zh:8e6fede9f5e25b507faf6cacd61b997035b8b62859245861149ddb2990ada8eb",
|
||||
"zh:9acc2387084b9c411e264c4351633bc82f9c4e420f8e6bbad9f87b145351f929",
|
||||
"zh:b9e4af3b06386ceed720f0163a1496088c154aa1430ae072c525ffefa4b37891",
|
||||
"zh:c7d5dfb8f8536694db6740e2a4afd2d681b60b396ded469282524c62ce154861",
|
||||
"zh:d0850be710c6fd682634a2f823beed0164231cc873b1dc09038aa477c926f57c",
|
||||
"zh:e90c2cba9d89db5eab295b2f046f24a53f23002bcfe008633d398fb3fa16d941",
|
||||
"h1:EdBny2gaLr/IE+l+6csyCKeIGFMYZ/4tHKpcbS7ArgE=",
|
||||
"zh:2f3ceeb3318a6304026035b0ac9ee3e52df04913bb9ee78827e58c5398b41254",
|
||||
"zh:3fbe76c7d957d20dfe3c8c0528b33084651f22a95be9e0452b658e0922916e2a",
|
||||
"zh:595671a05828cfe6c42ef73aac894ac39f81a52cc662a76f37eb74ebe04ddf75",
|
||||
"zh:5d76e8788d2af3e60daf8076babf763ec887480bbb9734baccccd8fcddf4f03e",
|
||||
"zh:676985afeaca6e67b22d60d43fd0ed7055763029ffebc3026089fe2fd3b4a288",
|
||||
"zh:69152ce6164ac999a640cff962ece45208270e1ac37c10dac484eeea5cf47275",
|
||||
"zh:6da0b15c05b81f947ec8e139bd81eeeb05c0d36eb5a967b985d0625c60998b40",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:822c0a3bbada5e38099a379db8b2e339526843699627c3be3664cc3b3752bab7",
|
||||
"zh:af23af2f98a84695b25c8eba7028a81ad4aad63c44aefb79e01bbe2dc82e7f78",
|
||||
"zh:e36cac9960b7506d92925b667254322520966b9c3feb3ca6102e57a1fb9b1761",
|
||||
"zh:ffd1e096c1cc35de879c740a91918e9f06b627818a3cb4b1d87b829b54a6985f",
|
||||
]
|
||||
}
|
||||
|
4
Makefile
4
Makefile
@ -39,10 +39,10 @@ secrets-update: $(VENV) .secrets-baseline
|
||||
ansible_galaxy: ansible_galaxy/ansible_collections ansible_galaxy/roles
|
||||
|
||||
ansible_galaxy/ansible_collections: $(VENV) ./ansible_galaxy/requirements.yml
|
||||
$(VENV)/bin/ansible-galaxy collection install -p ./ansible_galaxy -r ./ansible_collections/requirements.yml
|
||||
$(VENV)/bin/ansible-galaxy collection install -p ./ansible_galaxy -r ./ansible_galaxy/requirements.yml
|
||||
|
||||
ansible_galaxy/roles: $(VENV) ./ansible_galaxy/requirements.yml
|
||||
$(VENV)/bin/ansible-galaxy install -p ./ansible_galaxy/roles -r ./ansible_roles/requirements.yml
|
||||
$(VENV)/bin/ansible-galaxy install -p ./ansible_galaxy/roles -r ./ansible_galaxy/requirements.yml
|
||||
|
||||
.PHONY: ansible-cluster
|
||||
ansible-cluster: $(VENV) ansible_galaxy
|
||||
|
@ -63,22 +63,22 @@
|
||||
state: restarted
|
||||
become: true
|
||||
|
||||
- name: Start Vault
|
||||
hosts: nomad_instances
|
||||
|
||||
tasks:
|
||||
- name: Start Vault
|
||||
systemd:
|
||||
name: vault
|
||||
state: started
|
||||
become: true
|
||||
|
||||
- name: Start Nomad
|
||||
hosts: nomad_instances
|
||||
|
||||
tasks:
|
||||
- name: Start Nomad
|
||||
systemd:
|
||||
name: nomad
|
||||
state: started
|
||||
become: true
|
||||
# - name: Start Vault
|
||||
# hosts: nomad_instances
|
||||
#
|
||||
# tasks:
|
||||
# - name: Start Vault
|
||||
# systemd:
|
||||
# name: vault
|
||||
# state: started
|
||||
# become: true
|
||||
#
|
||||
# - name: Start Nomad
|
||||
# hosts: nomad_instances
|
||||
#
|
||||
# tasks:
|
||||
# - name: Start Nomad
|
||||
# systemd:
|
||||
# name: nomad
|
||||
# state: started
|
||||
# become: true
|
||||
|
@ -1,148 +1,4 @@
|
||||
---
|
||||
- name: Build Consul cluster
|
||||
hosts: consul_instances
|
||||
any_errors_fatal: true
|
||||
|
||||
roles:
|
||||
- role: ansible-consul
|
||||
vars:
|
||||
consul_version: "1.13.3-1"
|
||||
consul_install_upgrade: true
|
||||
consul_install_from_repo: true
|
||||
consul_os_repo_prerequisites: []
|
||||
|
||||
consul_node_role: server
|
||||
consul_raft_protocol: 3
|
||||
consul_bootstrap_expect: true
|
||||
consul_bootstrap_expect_value: "{{ [(play_hosts | length), 3] | min }}"
|
||||
|
||||
consul_user: consul
|
||||
consul_manage_user: true
|
||||
consul_group: bin
|
||||
consul_manage_group: true
|
||||
|
||||
# consul_tls_enable: true
|
||||
consul_connect_enabled: true
|
||||
consul_ports_grpc: 8502
|
||||
consul_client_address: "0.0.0.0"
|
||||
|
||||
# Autopilot
|
||||
consul_autopilot_enable: true
|
||||
consul_autopilot_cleanup_dead_Servers: true
|
||||
|
||||
# Enable metrics
|
||||
consul_config_custom:
|
||||
telemetry:
|
||||
prometheus_retention_time: "2h"
|
||||
|
||||
# DNS forwarding
|
||||
consul_dnsmasq_enable: true
|
||||
consul_dnsmasq_servers:
|
||||
# TODO: use addresses of other nomad nodes?
|
||||
# Maybe this can be [] to get the values from dhcp
|
||||
- 1.1.1.1
|
||||
- 1.0.0.1
|
||||
consul_dnsmasq_bind_interfaces: true
|
||||
consul_dnsmasq_listen_addresses:
|
||||
# Listen only to loopback interface
|
||||
- 127.0.0.1
|
||||
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- name: Start Consul
|
||||
systemd:
|
||||
state: started
|
||||
name: consul
|
||||
become: true
|
||||
|
||||
# If DNS is broken after dnsmasq, then need to set /etc/resolv.conf to something
|
||||
# pointing to 127.0.0.1 and possibly restart Docker and Nomad
|
||||
# Actually, we should point to our external Nomad address so that Docker uses it
|
||||
- name: Update resolv.conf
|
||||
lineinfile:
|
||||
dest: /etc/resolv.conf
|
||||
create: true
|
||||
line: "nameserver {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
|
||||
become: true
|
||||
|
||||
- name: Setup Vault cluster
|
||||
hosts: vault_instances
|
||||
|
||||
roles:
|
||||
- name: ansible-vault
|
||||
vars:
|
||||
vault_version: 1.12.0-1
|
||||
vault_install_hashi_repo: true
|
||||
vault_harden_file_perms: true
|
||||
# Maybe this should be restricted
|
||||
vault_group: bin
|
||||
vault_bin_path: /usr/bin
|
||||
vault_address: 0.0.0.0
|
||||
|
||||
vault_backend: consul
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- name: Get Vault status
|
||||
uri:
|
||||
url: http://127.0.0.1:8200/v1/sys/health
|
||||
method: GET
|
||||
status_code: 200, 429, 472, 473, 501, 503
|
||||
body_format: json
|
||||
return_content: true
|
||||
register: vault_status
|
||||
|
||||
- name: Initialize Vault
|
||||
when: not vault_status.json["initialized"]
|
||||
block:
|
||||
- name: Initialize Vault
|
||||
command:
|
||||
argv:
|
||||
- "vault"
|
||||
- "operator"
|
||||
- "init"
|
||||
- "-format=json"
|
||||
- "-address=http://127.0.0.1:8200/"
|
||||
- "-key-shares={{ vault_init_key_shares|default(3) }}"
|
||||
- "-key-threshold={{ vault_init_key_threshold|default(2) }}"
|
||||
run_once: true
|
||||
register: vault_init
|
||||
|
||||
- name: Save initialize result
|
||||
copy:
|
||||
content: "{{ vault_init.stdout }}"
|
||||
dest: "../vault-keys.json"
|
||||
when: vault_init is succeeded
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
|
||||
- name: Unseal from init
|
||||
no_log: true
|
||||
command:
|
||||
argv:
|
||||
- "vault"
|
||||
- "operator"
|
||||
- "unseal"
|
||||
- "-address=http://127.0.0.1:8200/"
|
||||
- "{{ item }}"
|
||||
loop: "{{ (vault_init.stdout | from_json)['unseal_keys_hex'] }}"
|
||||
when: vault_init is succeeded
|
||||
|
||||
- name: Unseal Vault
|
||||
no_log: true
|
||||
command:
|
||||
argv:
|
||||
- "vault"
|
||||
- "operator"
|
||||
- "unseal"
|
||||
- "-address=http://127.0.0.1:8200/"
|
||||
- "{{ item }}"
|
||||
loop: "{{ unseal_keys_hex }}"
|
||||
when:
|
||||
- unseal_keys_hex is defined
|
||||
- vault_status.json["sealed"]
|
||||
|
||||
- name: Install Docker
|
||||
hosts: nomad_instances
|
||||
become: true
|
||||
@ -323,8 +179,8 @@
|
||||
enabled: true
|
||||
selinuxlabel: "z"
|
||||
# Send logs to journald so we can scrape them for Loki
|
||||
logging:
|
||||
type: journald
|
||||
# logging:
|
||||
# type: journald
|
||||
extra_labels:
|
||||
- "job_name"
|
||||
- "job_id"
|
||||
@ -352,35 +208,9 @@
|
||||
# Enable ACLs
|
||||
nomad_acl_enabled: true
|
||||
|
||||
# Enable vault integration
|
||||
# HACK: Only talk to local Vault for now because it doesn't have HTTPS
|
||||
# TODO: Would be really great to have this over https and point to vault.consul.service
|
||||
# nomad_vault_address: "https://vault.service.consul:8200"
|
||||
# Right now, each node only talks to it's local Vault, so if that node is rebooted and
|
||||
# that vault is sealed, it will not have access to vault. This is a problem if a node
|
||||
# must reboot.
|
||||
nomad_vault_address: "http://127.0.0.1:8200"
|
||||
# TODO: This fails on first run because the Nomad-Vault integration can't be set up
|
||||
# until Nomad has started. Could maybe figure out if ACLs have been set up and leave
|
||||
# these out until the later play, maybe just bootstrap the nomad-cluster role in Vault
|
||||
# befor Nomad is set up
|
||||
nomad_vault_create_from_role: "nomad-cluster"
|
||||
# TODO: (security) Probably want to restict this to a narrower scoped token
|
||||
nomad_vault_enabled: "{{ root_token is defined }}"
|
||||
nomad_vault_token: "{{ root_token | default('') }}"
|
||||
|
||||
nomad_config_custom:
|
||||
ui:
|
||||
enabled: true
|
||||
consul:
|
||||
ui_url: "https://consul.thefij.rocks/ui"
|
||||
vault:
|
||||
ui_url: "https://vault.thefij.rocks/ui"
|
||||
consul:
|
||||
tags:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.consulcatalog.connect=true"
|
||||
- "traefik.http.routers.nomadclient.entrypoints=websecure"
|
||||
|
||||
- name: Bootstrap Nomad ACLs and scheduler
|
||||
hosts: nomad_instances
|
||||
@ -435,20 +265,6 @@
|
||||
changed_when: false
|
||||
register: read_secretid
|
||||
|
||||
- name: Enable service scheduler preemption
|
||||
command:
|
||||
argv:
|
||||
- nomad
|
||||
- operator
|
||||
- scheduler
|
||||
- set-config
|
||||
- -preempt-system-scheduler=true
|
||||
- -preempt-service-scheduler=true
|
||||
environment:
|
||||
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
|
||||
delegate_to: "{{ play_hosts[0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Look for policy
|
||||
command:
|
||||
argv:
|
||||
@ -465,8 +281,6 @@
|
||||
copy:
|
||||
src: ../acls/nomad-anon-policy.hcl
|
||||
dest: /tmp/anonymous.policy.hcl
|
||||
delegate_to: "{{ play_hosts[0] }}"
|
||||
register: anon_policy
|
||||
run_once: true
|
||||
|
||||
- name: Create anon-policy
|
||||
@ -485,18 +299,32 @@
|
||||
delegate_to: "{{ play_hosts[0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Set up Nomad backend and roles in Vault
|
||||
community.general.terraform:
|
||||
project_path: ../acls
|
||||
force_init: true
|
||||
variables:
|
||||
consul_address: "{{ play_hosts[0] }}:8500"
|
||||
vault_token: "{{ root_token }}"
|
||||
nomad_secret_id: "{{ read_secretid.stdout }}"
|
||||
delegate_to: localhost
|
||||
- name: Enable service scheduler preemption
|
||||
command:
|
||||
argv:
|
||||
- nomad
|
||||
- operator
|
||||
- scheduler
|
||||
- set-config
|
||||
- -preempt-system-scheduler=true
|
||||
- -preempt-service-scheduler=true
|
||||
environment:
|
||||
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
|
||||
delegate_to: "{{ play_hosts[0] }}"
|
||||
run_once: true
|
||||
notify:
|
||||
- Restart Nomad
|
||||
|
||||
# - name: Set up Nomad backend and roles in Vault
|
||||
# community.general.terraform:
|
||||
# project_path: ../acls
|
||||
# force_init: true
|
||||
# variables:
|
||||
# consul_address: "{{ play_hosts[0] }}:8500"
|
||||
# vault_token: "{{ root_token }}"
|
||||
# nomad_secret_id: "{{ read_secretid.stdout }}"
|
||||
# delegate_to: localhost
|
||||
# run_once: true
|
||||
# notify:
|
||||
# - Restart Nomad
|
||||
|
||||
handlers:
|
||||
- name: Restart Nomad
|
||||
|
@ -37,11 +37,13 @@ job "blocky" {
|
||||
|
||||
service {
|
||||
name = "blocky-dns"
|
||||
provider = "nomad"
|
||||
port = "dns"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "blocky-api"
|
||||
provider = "nomad"
|
||||
port = "api"
|
||||
|
||||
meta {
|
||||
@ -53,41 +55,6 @@ job "blocky" {
|
||||
"traefik.http.routers.blocky-api.entryPoints=websecure",
|
||||
]
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 4000
|
||||
|
||||
expose {
|
||||
path {
|
||||
path = "/metrics"
|
||||
protocol = "http"
|
||||
local_path_port = 4000
|
||||
listener_port = "api"
|
||||
}
|
||||
}
|
||||
|
||||
upstreams {
|
||||
destination_name = "redis"
|
||||
local_bind_port = 6379
|
||||
}
|
||||
|
||||
upstreams {
|
||||
destination_name = "mysql-server"
|
||||
local_bind_port = 4040
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 20
|
||||
memory_max = 50
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
check {
|
||||
name = "api-health"
|
||||
port = "api"
|
||||
@ -118,13 +85,6 @@ job "blocky" {
|
||||
memory_max = 100
|
||||
}
|
||||
|
||||
vault {
|
||||
policies = [
|
||||
"access-tables",
|
||||
"nomad-task",
|
||||
]
|
||||
}
|
||||
|
||||
template {
|
||||
data = var.config_data
|
||||
destination = "app/config.yml"
|
||||
@ -149,21 +109,16 @@ job "blocky" {
|
||||
]
|
||||
}
|
||||
|
||||
vault {
|
||||
policies = [
|
||||
"access-tables",
|
||||
"nomad-task",
|
||||
]
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
[client]
|
||||
host={{ env "NOMAD_UPSTREAM_IP_mysql_server" }}
|
||||
port={{ env "NOMAD_UPSTREAM_PORT_mysql_server" }}
|
||||
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-server" -}}
|
||||
host={{ .Address }}
|
||||
port={{ .Port }}
|
||||
{{ end -}}
|
||||
user=root
|
||||
{{ with secret "kv/data/mysql" }}
|
||||
password={{ .Data.data.root_password }}
|
||||
{{ with nomadVar "nomad/jobs" }}
|
||||
password={{ .mysql_root_password }}
|
||||
{{ end }}
|
||||
EOF
|
||||
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
|
||||
@ -171,16 +126,16 @@ password={{ .Data.data.root_password }}
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
{{ with secret "kv/data/blocky" -}}
|
||||
{{ if .Data.data.db_name -}}
|
||||
{{ $db_name := .Data.data.db_name }}
|
||||
{{ with nomadVar "nomad/jobs/blocky" -}}
|
||||
{{ if .db_name -}}
|
||||
{{ $db_name := .db_name }}
|
||||
CREATE DATABASE IF NOT EXISTS `{{ $db_name }}`;
|
||||
CREATE USER IF NOT EXISTS '{{ .Data.data.db_user }}'@'%' IDENTIFIED BY '{{ .Data.data.db_pass }}';
|
||||
GRANT ALL ON `{{ $db_name }}`.* to '{{ .Data.data.db_user }}'@'%';
|
||||
{{ with secret "kv/data/grafana" -}}
|
||||
CREATE USER IF NOT EXISTS '{{ .db_user }}'@'%' IDENTIFIED BY '{{ .db_pass }}';
|
||||
GRANT ALL ON `{{ $db_name }}`.* to '{{ .db_user }}'@'%';
|
||||
{{ with nomadVar "nomad/jobs" -}}
|
||||
-- Add grafana read_only user
|
||||
CREATE USER IF NOT EXISTS '{{ .Data.data.db_user_ro }}'@'%' IDENTIFIED BY '{{ .Data.data.db_pass_ro }}';
|
||||
GRANT SELECT ON `{{ $db_name }}`.* to '{{ .Data.data.db_user_ro }}'@'%';
|
||||
CREATE USER IF NOT EXISTS '{{ .db_user_ro }}'@'%' IDENTIFIED BY '{{ .db_pass_ro }}';
|
||||
GRANT SELECT ON `{{ $db_name }}`.* to '{{ .db_user_ro }}'@'%';
|
||||
{{ end -}}
|
||||
{{ else -}}
|
||||
SELECT 'NOOP';
|
||||
|
@ -25,7 +25,8 @@ upstream:
|
||||
conditional:
|
||||
fallbackUpstream: false
|
||||
mapping:
|
||||
consul: {{ env "attr.unique.network.ip-address" }}:8600
|
||||
# TODO: Run a simple dns server that this can forward to where it's hosts are set by nomad-services
|
||||
# consul: {{ env "attr.unique.network.ip-address" }}:8600
|
||||
home.arpa: 192.168.2.1
|
||||
in-addr.arpa: 192.168.2.1
|
||||
iot: 192.168.2.1
|
||||
@ -52,7 +53,9 @@ blocking:
|
||||
whiteLists:
|
||||
# Move to Gitea when deployed internally
|
||||
ads:
|
||||
{{ keyOrDefault "blocky/whitelists/ads" "# None" | indent 6 }}
|
||||
{{ with nomadVar "nomad/jobs/blocky" -}}
|
||||
{{ .whitelists_ads | indent 6 }}
|
||||
{{- end }}
|
||||
clientGroupsBlock:
|
||||
default:
|
||||
- ads
|
||||
@ -62,36 +65,39 @@ blocking:
|
||||
customDNS:
|
||||
customTTL: 1h
|
||||
mapping:
|
||||
{{ with service "traefik" -}}
|
||||
{{ with nomadService "traefik" -}}
|
||||
{{- $last := len . | subtract 1 -}}
|
||||
{{- $services := . -}}
|
||||
{{ keyOrDefault "global/base_hostname" "${base_hostname}" }}: {{ range $i := loop $last -}}
|
||||
{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}: {{ range $i := loop $last -}}
|
||||
{{- with index $services $i }}{{ .Address }},{{ end -}}
|
||||
{{- end -}}
|
||||
{{- with index . $last }}{{ .Address }}{{ end -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
# Other mappings
|
||||
{{ keyOrDefault "blocky/mappings" "# None" | indent 4 }}
|
||||
{{ with nomadVar "nomad/jobs/blocky" }}{{ .mappings | indent 4 }}{{ end }}
|
||||
|
||||
prometheus:
|
||||
enable: true
|
||||
|
||||
{{ with service "redis" -}}
|
||||
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis" -}}
|
||||
redis:
|
||||
address: {{ env "NOMAD_UPSTREAM_ADDR_redis" }}
|
||||
address: {{ .Address }}:{{ .Port }}
|
||||
# password: ""
|
||||
# database: 0
|
||||
connectionAttempts: 10
|
||||
connectionCooldown: 3s
|
||||
{{ end -}}
|
||||
|
||||
{{ with service "vault" -}}{{ with service "mysql-server" -}}
|
||||
{{ with secret "kv/data/blocky" -}}
|
||||
{{ $mysql_addr := "" }}
|
||||
{{ with nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-server" -}}{{ range . -}}
|
||||
{{ $mysql_addr = print .Address ":" .Port }}
|
||||
{{- end }}{{- end }}
|
||||
{{ with nomadVar "nomad/jobs/blocky" -}}
|
||||
queryLog:
|
||||
type: mysql
|
||||
target: {{ .Data.data.db_user }}:{{ .Data.data.db_pass }}@tcp({{ env "NOMAD_UPSTREAM_ADDR_mysql_server" }})/{{ .Data.data.db_name }}?charset=utf8mb4&parseTime=True&loc=Local
|
||||
target: {{ .db_user }}:{{ .db_pass }}@tcp({{ $mysql_addr }})/{{ .db_name }}?charset=utf8mb4&parseTime=True&loc=Local
|
||||
logRetentionDays: 14
|
||||
{{ end -}}{{ end -}}{{ end -}}
|
||||
{{ end -}}
|
||||
|
||||
port: 53
|
||||
httpPort: 4000
|
||||
|
@ -16,30 +16,23 @@ job "ddclient" {
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
policies = [
|
||||
"access-tables",
|
||||
"nomad-task",
|
||||
]
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ with nomadVar "nomad/jobs/ddclient" -}}
|
||||
daemon=900
|
||||
ssl=yes
|
||||
use=web
|
||||
|
||||
protocol=cloudflare,
|
||||
zone={{ key "ddclient/zone" }},
|
||||
zone={{ .zone }},
|
||||
ttl=1,
|
||||
{{ with secret "kv/data/cloudflare" -}}
|
||||
login={{ .Data.data.api_user }},
|
||||
password={{ .Data.data.api_key }}
|
||||
login={{ .cloudflare_api_user }},
|
||||
password={{ .cloudflare_api_key }}
|
||||
# login=token,
|
||||
# password={{ .Data.data.api_token_dns_edit_all }}
|
||||
{{ end -}}
|
||||
# password={{ .cloudflare_api_token_dns_edit_all }}
|
||||
|
||||
{{ key "ddclient/domain" }}
|
||||
{{ .domain }}
|
||||
{{- end }}
|
||||
EOH
|
||||
destination = "secrets/ddclient.conf"
|
||||
change_mode = "restart"
|
||||
|
@ -8,12 +8,10 @@ job "lldap" {
|
||||
mode = "bridge"
|
||||
|
||||
port "web" {
|
||||
host_network = "loopback"
|
||||
to = 17170
|
||||
}
|
||||
|
||||
port "ldap" {
|
||||
host_network = "loopback"
|
||||
to = 3890
|
||||
}
|
||||
}
|
||||
@ -26,47 +24,15 @@ job "lldap" {
|
||||
|
||||
service {
|
||||
name = "lldap"
|
||||
provider = "nomad"
|
||||
port = "ldap"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 3890
|
||||
|
||||
config {
|
||||
protocol = "tcp"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 20
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "ldap-admin"
|
||||
provider = "nomad"
|
||||
port = "web"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 17170
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 20
|
||||
memory = 20
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.ldap-admin.entryPoints=websecure",
|
||||
@ -94,13 +60,6 @@ job "lldap" {
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
policies = [
|
||||
"access-tables",
|
||||
"nomad-task",
|
||||
]
|
||||
}
|
||||
|
||||
env = {
|
||||
"LLDAP_LDAP_PORT" = "${NOMAD_PORT_ldap}"
|
||||
"LLDAP_HTTP_PORT" = "${NOMAD_PORT_web}"
|
||||
@ -110,25 +69,25 @@ job "lldap" {
|
||||
data = <<EOH
|
||||
database_url = "sqlite:///data/users.db?mode=rwc"
|
||||
key_file = "/data/private_key"
|
||||
ldap_base_dn = "{{ keyOrDefault "global/ldap/base_dn" "dc=example,dc=com" }}"
|
||||
{{ with secret "kv/data/lldap" -}}
|
||||
jwt_secret = "{{ .Data.data.jwt_secret }}"
|
||||
ldap_user_dn = "{{ .Data.data.admin_user }}"
|
||||
ldap_user_email = "{{ .Data.data.admin_email }}"
|
||||
ldap_user_pass = "{{ .Data.data.admin_password }}"
|
||||
ldap_base_dn = "{{ with nomadVar "nomad/jobs" }}{{ .base_dn }}{{ end }}"
|
||||
{{ with nomadVar "nomad/jobs/lldap" }}
|
||||
jwt_secret = "{{ .jwt_secret }}"
|
||||
ldap_user_dn = "{{ .admin_user }}"
|
||||
ldap_user_email = "{{ .admin_email }}"
|
||||
ldap_user_pass = "{{ .admin_password }}"
|
||||
{{ end -}}
|
||||
{{ with secret "kv/data/smtp" -}}
|
||||
{{ with nomadVar "nomad/jobs" -}}
|
||||
[smtp_options]
|
||||
enable_password_reset = true
|
||||
server = "{{ .Data.data.server }}"
|
||||
port = {{ .Data.data.port }}
|
||||
tls_required = {{ .Data.data.tls }}
|
||||
user = "{{ .Data.data.user }}"
|
||||
password = "{{ .Data.data.password }}"
|
||||
{{ with secret "kv/data/lldap" -}}
|
||||
from = "{{ .Data.data.smtp_from }}"
|
||||
reply_to = "{{ .Data.data.smtp_reply_to }}"
|
||||
server = "{{ .smtp_server }}"
|
||||
port = {{ .smtp_port }}
|
||||
tls_required = {{ .smtp_tls }}
|
||||
user = "{{ .smtp_user }}"
|
||||
password = "{{ .smtp_password }}"
|
||||
{{ end -}}
|
||||
{{ with nomadVar "nomad/jobs/lldap" -}}
|
||||
from = "{{ .smtp_from }}"
|
||||
reply_to = "{{ .smtp_reply_to }}"
|
||||
{{ end -}}
|
||||
EOH
|
||||
destination = "secrets/lldap_config.toml"
|
||||
|
77
core/main.tf
77
core/main.tf
@ -1,4 +1,3 @@
|
||||
|
||||
module "blocky" {
|
||||
source = "./blocky"
|
||||
|
||||
@ -30,11 +29,11 @@ module "nomad_login" {
|
||||
}
|
||||
}
|
||||
|
||||
module "metrics" {
|
||||
source = "./metrics"
|
||||
# Not in this module
|
||||
# depends_on = [module.databases]
|
||||
}
|
||||
# module "metrics" {
|
||||
# source = "./metrics"
|
||||
# # Not in this module
|
||||
# # depends_on = [module.databases]
|
||||
# }
|
||||
|
||||
module "loki" {
|
||||
source = "IamTheFij/levant/nomad"
|
||||
@ -57,34 +56,6 @@ module "loki" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "consul_config_entry" "loki_intent" {
|
||||
name = "loki"
|
||||
kind = "service-intentions"
|
||||
|
||||
config_json = jsonencode({
|
||||
Sources = [
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "grafana"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "promtail"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "syslogng-promtail"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
resource "nomad_job" "syslog-ng" {
|
||||
jobspec = file("${path.module}/syslogng.nomad")
|
||||
}
|
||||
@ -96,41 +67,3 @@ resource "nomad_job" "ddclient" {
|
||||
resource "nomad_job" "lldap" {
|
||||
jobspec = file("${path.module}/lldap.nomad")
|
||||
}
|
||||
|
||||
resource "consul_config_entry" "syslogng_promtail_intent" {
|
||||
name = "syslogng-promtail"
|
||||
kind = "service-intentions"
|
||||
|
||||
config_json = jsonencode({
|
||||
Sources = [
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "syslogng"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
resource "consul_config_entry" "global_access" {
|
||||
name = "*"
|
||||
kind = "service-intentions"
|
||||
|
||||
config_json = jsonencode({
|
||||
Sources = [
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "traefik"
|
||||
Precedence = 6
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "deny"
|
||||
Name = "*"
|
||||
Precedence = 5
|
||||
Type = "consul"
|
||||
},
|
||||
]
|
||||
})
|
||||
}
|
||||
|
@ -19,28 +19,8 @@ job "syslogng" {
|
||||
|
||||
service {
|
||||
name = "syslogng-promtail"
|
||||
provider = "nomad"
|
||||
port = "main"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 1514
|
||||
|
||||
upstreams {
|
||||
destination_name = "loki"
|
||||
local_bind_port = 1000
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 20
|
||||
memory_max = 50
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "promtail" {
|
||||
@ -66,7 +46,9 @@ server:
|
||||
http_listen_port: 9080
|
||||
|
||||
clients:
|
||||
- url: http://{{ env "NOMAD_UPSTREAM_ADDR_loki" }}/loki/api/v1/push
|
||||
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "loki" -}}
|
||||
- url: http://{{ .Address }}:{{ .Port }}/loki/api/v1/push
|
||||
{{- end }}
|
||||
|
||||
scrape_configs:
|
||||
# TCP syslog receiver
|
||||
@ -155,8 +137,9 @@ source s_internal {
|
||||
};
|
||||
|
||||
destination d_loki {
|
||||
# Forward to Connect proxy to Promtail
|
||||
syslog("{{ env "NOMAD_UPSTREAM_IP_syslogng-promtail" }}" transport("tcp") port({{ env "NOMAD_UPSTREAM_PORT_syslogng-promtail" }}));
|
||||
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "syslogng-promtail" -}}
|
||||
syslog("{{ .Address }}" transport("tcp") port({{ .Port }}));
|
||||
{{- end }}
|
||||
};
|
||||
|
||||
log { source(s_internal); destination(d_loki); };
|
||||
|
@ -49,6 +49,7 @@ job "traefik" {
|
||||
|
||||
service {
|
||||
name = "traefik"
|
||||
provider = "nomad"
|
||||
port = "web"
|
||||
|
||||
check {
|
||||
@ -59,10 +60,6 @@ job "traefik" {
|
||||
timeout = "2s"
|
||||
}
|
||||
|
||||
connect {
|
||||
native = true
|
||||
}
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.traefik.entryPoints=websecure",
|
||||
@ -92,10 +89,6 @@ job "traefik" {
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
policies = ["access-tables", "nomad-task"]
|
||||
}
|
||||
|
||||
template {
|
||||
# Avoid conflict with TOML lists [[ ]] and Go templates {{ }}
|
||||
left_delimiter = "<<"
|
||||
@ -116,11 +109,9 @@ job "traefik" {
|
||||
[entryPoints.websecure]
|
||||
address = ":443"
|
||||
[entryPoints.websecure.http.tls]
|
||||
<< if keyExists "traefik/acme/email" ->>
|
||||
certResolver = "letsEncrypt"
|
||||
[[entryPoints.websecure.http.tls.domains]]
|
||||
main = "*.<< keyOrDefault "global/base_hostname" "${var.base_hostname}" >>"
|
||||
<< end ->>
|
||||
main = "*.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>"
|
||||
|
||||
[entryPoints.metrics]
|
||||
address = ":8989"
|
||||
@ -146,34 +137,30 @@ job "traefik" {
|
||||
directory = "/etc/traefik/conf"
|
||||
watch = true
|
||||
|
||||
[providers.consulCatalog]
|
||||
connectAware = true
|
||||
connectByDefault = true
|
||||
[providers.nomad]
|
||||
exposedByDefault = false
|
||||
defaultRule = "Host(`{{normalize .Name}}.<< keyOrDefault "global/base_hostname" "${var.base_hostname}" >>`)"
|
||||
[providers.consulCatalog.endpoint]
|
||||
address = "http://<< env "CONSUL_HTTP_ADDR" >>"
|
||||
defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)"
|
||||
[providers.nomad.endpoint]
|
||||
address = "http://<< env "attr.unique.network.ip-address" >>:4646"
|
||||
|
||||
<< if keyExists "traefik/acme/email" ->>
|
||||
[certificatesResolvers.letsEncrypt.acme]
|
||||
email = "<< key "traefik/acme/email" >>"
|
||||
email = "<< with nomadVar "nomad/jobs/traefik" >><< .acme_email >><< end >>"
|
||||
# Store in /local because /secrets doesn't persist with ephemeral disk
|
||||
storage = "/local/acme.json"
|
||||
[certificatesResolvers.letsEncrypt.acme.dnsChallenge]
|
||||
provider = "cloudflare"
|
||||
resolvers = ["1.1.1.1:53", "8.8.8.8:53"]
|
||||
delayBeforeCheck = 0
|
||||
<< end ->>
|
||||
EOH
|
||||
destination = "local/config/traefik.toml"
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ with secret "kv/data/cloudflare" }}
|
||||
CF_DNS_API_TOKEN={{ .Data.data.api_token_dns_edit }}
|
||||
CF_ZONE_API_TOKEN={{ .Data.data.api_token_zone_read }}
|
||||
{{ end }}
|
||||
{{ with nomadVar "nomad/jobs/traefik" -}}
|
||||
CF_DNS_API_TOKEN={{ .cloudflare_api_token_dns_edit }}
|
||||
CF_ZONE_API_TOKEN={{ .cloudflare_api_token_zone_read }}
|
||||
{{- end }}
|
||||
EOH
|
||||
destination = "secrets/cloudflare.env"
|
||||
env = true
|
||||
@ -185,46 +172,17 @@ CF_ZONE_API_TOKEN={{ .Data.data.api_token_zone_read }}
|
||||
[http.routers]
|
||||
[http.routers.nomad]
|
||||
entryPoints = ["websecure"]
|
||||
# middlewares = []
|
||||
service = "nomad"
|
||||
rule = "Host(`nomad.{{ keyOrDefault "global/base_hostname" "${var.base_hostname}" }}`)"
|
||||
[http.routers.consul]
|
||||
entryPoints = ["websecure"]
|
||||
# middlewares = []
|
||||
service = "consul"
|
||||
rule = "Host(`consul.{{ keyOrDefault "global/base_hostname" "${var.base_hostname}" }}`)"
|
||||
[http.routers.vault]
|
||||
entryPoints = ["websecure"]
|
||||
# middlewares = []
|
||||
service = "vault"
|
||||
rule = "Host(`vault.{{ keyOrDefault "global/base_hostname" "${var.base_hostname}" }}`)"
|
||||
rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
|
||||
|
||||
[http.services]
|
||||
{{ with service "nomad-client" -}}
|
||||
{{ with nomadService "nomad-client" -}}
|
||||
[http.services.nomad]
|
||||
[http.services.nomad.loadBalancer]
|
||||
{{ range . -}}
|
||||
[[http.services.nomad.loadBalancer.servers]]
|
||||
url = "http://{{ .Address }}:{{ .Port }}"
|
||||
{{ end }}
|
||||
{{- end }}
|
||||
{{ with service "consul" -}}
|
||||
[http.services.consul]
|
||||
[http.services.consul.loadBalancer]
|
||||
{{ range . -}}
|
||||
[[http.services.consul.loadBalancer.servers]]
|
||||
# Not using .Port because that's an RPC port
|
||||
url = "http://{{ .Address }}:8500"
|
||||
{{ end }}
|
||||
{{- end }}
|
||||
{{ with service "vault" -}}
|
||||
[http.services.vault]
|
||||
[http.services.vault.loadBalancer]
|
||||
[http.services.vault.loadBalancer.sticky.cookie]
|
||||
{{ range . -}}
|
||||
[[http.services.vault.loadBalancer.servers]]
|
||||
url = "http://{{ .Address }}:{{ .Port }}"
|
||||
{{ end }}
|
||||
{{- end }}
|
||||
EOH
|
||||
destination = "local/config/conf/route-hashi.toml"
|
||||
@ -233,7 +191,7 @@ CF_ZONE_API_TOKEN={{ .Data.data.api_token_zone_read }}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ with service "syslogng" -}}
|
||||
{{ with nomadService "syslogng" -}}
|
||||
[tcp.routers]
|
||||
[tcp.routers.syslogtcp]
|
||||
entryPoints = ["syslogtcp"]
|
||||
@ -249,7 +207,7 @@ CF_ZONE_API_TOKEN={{ .Data.data.api_token_zone_read }}
|
||||
{{ end -}}
|
||||
{{ end }}
|
||||
|
||||
{{ with service "syslogng" -}}
|
||||
{{ with nomadService "syslogng" -}}
|
||||
[udp.routers]
|
||||
[udp.routers.syslogudp]
|
||||
entryPoints = ["syslogudp"]
|
||||
@ -271,9 +229,10 @@ CF_ZONE_API_TOKEN={{ .Data.data.api_token_zone_read }}
|
||||
template {
|
||||
data = <<EOH
|
||||
[http.middlewares]
|
||||
{{ with secret "kv/data/traefik" }}
|
||||
{{ if .Data.data.usersfile }}
|
||||
{{ with nomadVar "nomad/jobs/traefik" }}
|
||||
{{ if .usersfile }}
|
||||
[http.middlewares.basic-auth.basicAuth]
|
||||
# TODO: Reference secrets mount
|
||||
usersFile = "/etc/traefik/usersfile"
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
@ -284,8 +243,8 @@ CF_ZONE_API_TOKEN={{ .Data.data.api_token_zone_read }}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ with secret "kv/data/traefik" }}
|
||||
{{ .Data.data.usersfile }}
|
||||
{{ with nomadVar "nomad/jobs/traefik" }}
|
||||
{{ .usersfile }}
|
||||
{{ end }}
|
||||
EOH
|
||||
destination = "secrets/usersfile"
|
||||
|
@ -9,7 +9,6 @@ job "adminer" {
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "adminer" {
|
||||
host_network = "loopback"
|
||||
to = 8080
|
||||
}
|
||||
}
|
||||
@ -18,30 +17,6 @@ job "adminer" {
|
||||
name = "adminer"
|
||||
port = "adminer"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 8080
|
||||
|
||||
upstreams {
|
||||
destination_name = "mysql-server"
|
||||
local_bind_port = 4040
|
||||
}
|
||||
|
||||
config {
|
||||
protocol = "tcp"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 25
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.adminer.entryPoints=websecure",
|
||||
@ -56,8 +31,14 @@ job "adminer" {
|
||||
ports = ["adminer"]
|
||||
}
|
||||
|
||||
env = {
|
||||
"ADMINER_DEFAULT_SERVER" = "${NOMAD_UPSTREAM_ADDR_mysql_server}"
|
||||
template {
|
||||
data = <<EOF
|
||||
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-server" -}}
|
||||
ADMINER_DEFAULT_SERVER={{ .Address }}:{{ .Port }}
|
||||
{{- end }}
|
||||
EOF
|
||||
env = true
|
||||
destination = "env"
|
||||
}
|
||||
|
||||
resources {
|
||||
|
@ -15,8 +15,8 @@ job "mysql-server" {
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
|
||||
port "db" {
|
||||
host_network = "loopback"
|
||||
to = 3306
|
||||
}
|
||||
}
|
||||
@ -29,22 +29,8 @@ job "mysql-server" {
|
||||
|
||||
service {
|
||||
name = "mysql-server"
|
||||
provider = "nomad"
|
||||
port = "db"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 3306
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 50
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "mysql-server" {
|
||||
@ -56,13 +42,6 @@ job "mysql-server" {
|
||||
args = ["--innodb-buffer-pool-size=1G"]
|
||||
}
|
||||
|
||||
vault {
|
||||
policies = [
|
||||
"access-tables",
|
||||
"nomad-task",
|
||||
]
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "mysql-data"
|
||||
destination = "/var/lib/mysql"
|
||||
@ -76,8 +55,8 @@ job "mysql-server" {
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ with secret "kv/data/mysql" }}
|
||||
MYSQL_ROOT_PASSWORD={{ .Data.data.root_password }}
|
||||
{{ with nomadVar "nomad/jobs" }}
|
||||
MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
|
||||
{{ end }}
|
||||
EOH
|
||||
destination = "secrets/db.env"
|
||||
|
@ -16,50 +16,3 @@ resource "nomad_job" "adminer" {
|
||||
|
||||
jobspec = file("${path.module}/adminer.nomad")
|
||||
}
|
||||
|
||||
# NOTE: This may need to be moved to after the services are created
|
||||
resource "consul_config_entry" "mysql_intents" {
|
||||
name = "mysql-server"
|
||||
kind = "service-intentions"
|
||||
|
||||
config_json = jsonencode({
|
||||
Sources = [
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "adminer"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "nextcloud"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "backups"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "grafana"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "blocky-api"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "photoprism"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
]
|
||||
})
|
||||
}
|
||||
|
@ -16,30 +16,15 @@ job "redis" {
|
||||
mode = "bridge"
|
||||
|
||||
port "main" {
|
||||
host_network = "loopback"
|
||||
to = 6379
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "redis"
|
||||
provider = "nomad"
|
||||
port = "main"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 6379
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 50
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# check {
|
||||
# name = "alive"
|
||||
# type = "tcp"
|
||||
|
@ -16,31 +16,3 @@ resource "nomad_job" "rediscommander" {
|
||||
|
||||
jobspec = file("${path.module}/rediscommander.nomad")
|
||||
}
|
||||
|
||||
resource "consul_config_entry" "redis_intents" {
|
||||
name = "redis"
|
||||
kind = "service-intentions"
|
||||
|
||||
config_json = jsonencode({
|
||||
Sources = [
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "blocky-api"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "rediscommander"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "authelia"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
]
|
||||
})
|
||||
}
|
||||
|
@ -9,7 +9,6 @@ job "rediscommander" {
|
||||
mode = "bridge"
|
||||
|
||||
port "main" {
|
||||
host_network = "loopback"
|
||||
to = 8081
|
||||
}
|
||||
}
|
||||
@ -18,26 +17,6 @@ job "rediscommander" {
|
||||
name = "rediscommander"
|
||||
port = "main"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 8081
|
||||
|
||||
upstreams {
|
||||
destination_name = "redis"
|
||||
local_bind_port = 6379
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 25
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.rediscommander.entryPoints=websecure",
|
||||
@ -52,8 +31,14 @@ job "rediscommander" {
|
||||
ports = ["main"]
|
||||
}
|
||||
|
||||
env = {
|
||||
"REDIS_HOSTS" = "local:${NOMAD_UPSTREAM_ADDR_redis}"
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis" -}}
|
||||
REDIS_HOSTS=local:{{ .Address }}:{{ .Port }}
|
||||
{{- end }}
|
||||
EOH
|
||||
env = true
|
||||
destination = "env"
|
||||
}
|
||||
|
||||
resources {
|
||||
|
73
nomad_vars.py
Executable file
73
nomad_vars.py
Executable file
@ -0,0 +1,73 @@
|
||||
#! /usr/bin/env python3
|
||||
from collections import defaultdict
|
||||
from os import getenv
|
||||
|
||||
import requests
|
||||
import yaml
|
||||
|
||||
|
||||
NOMAD_ADDR = getenv("NOMAD_ADDR", "http://127.0.0.1:4646")
|
||||
NOMAD_TOKEN = getenv("NOMAD_TOKEN")
|
||||
|
||||
|
||||
def write_var(path: str, items: dict[str, str | float | int]) -> dict:
|
||||
headers = {}
|
||||
if NOMAD_TOKEN:
|
||||
headers["X-Nomad-Token"] = NOMAD_TOKEN
|
||||
|
||||
result = requests.post(
|
||||
f"{NOMAD_ADDR}/v1/var/{path}",
|
||||
headers=headers,
|
||||
json={
|
||||
"Path": path,
|
||||
"Items": {k: str(v) for k, v in items.items()},
|
||||
},
|
||||
)
|
||||
|
||||
print(result.text)
|
||||
result.raise_for_status()
|
||||
|
||||
return result.json()
|
||||
|
||||
|
||||
def write_consul():
|
||||
with open("./ansible_playbooks/vars/consul_values.yml") as f:
|
||||
vars = yaml.load(f, yaml.CLoader)["consul_values"]
|
||||
|
||||
key_values = defaultdict(list)
|
||||
for path, value in vars.items():
|
||||
path, _, item = path.rpartition("/")
|
||||
key_values[path].append((item, value))
|
||||
|
||||
for path, items in key_values.items():
|
||||
print("path", path, "items", items)
|
||||
response = write_var(path, dict(items))
|
||||
print(response)
|
||||
|
||||
|
||||
def write_vault():
|
||||
with open("./ansible_playbooks/vars/vault_hashi_vault_values.yml") as f:
|
||||
vars = yaml.load(f, yaml.CLoader)["hashi_vault_values"]
|
||||
prefix = "secrets/"
|
||||
|
||||
for path, items in vars.items():
|
||||
print("path", path, "items", items)
|
||||
response = write_var(prefix + path, items)
|
||||
print(response)
|
||||
|
||||
def write_nomad():
|
||||
with open("./ansible_playbooks/vars/nomad_vars.yml") as f:
|
||||
vars = yaml.load(f, yaml.CLoader)
|
||||
|
||||
for path, items in vars.items():
|
||||
print("path", path, "items", items)
|
||||
response = write_var(path, items)
|
||||
print(response)
|
||||
|
||||
|
||||
def main():
|
||||
write_nomad()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
41
providers.tf
41
providers.tf
@ -1,45 +1,6 @@
|
||||
# Configure Consul provider
|
||||
provider "consul" {
|
||||
address = var.consul_address
|
||||
}
|
||||
|
||||
# Get Nomad client from Consul
|
||||
data "consul_service" "nomad" {
|
||||
name = "nomad-client"
|
||||
}
|
||||
|
||||
# Get Vault client from Consul
|
||||
data "consul_service" "vault" {
|
||||
name = "vault"
|
||||
tag = "active"
|
||||
}
|
||||
|
||||
locals {
|
||||
# Get Nomad address from Consul
|
||||
nomad_node = data.consul_service.nomad.service[0]
|
||||
nomad_node_address = "http://${local.nomad_node.node_address}:${local.nomad_node.port}"
|
||||
|
||||
# Get Vault address from Consul
|
||||
vault_node = data.consul_service.vault.service[0]
|
||||
vault_node_address = "http://${local.vault_node.node_address}:${local.vault_node.port}"
|
||||
}
|
||||
|
||||
# Configure the Vault provider
|
||||
provider "vault" {
|
||||
address = length(var.vault_address) == 0 ? local.vault_node_address : var.vault_address
|
||||
token = var.vault_token
|
||||
}
|
||||
|
||||
# Something that should exist in a post bootstrap module, right now module includes bootstrapping
|
||||
# which requries Admin
|
||||
# data "vault_nomad_access_token" "deploy" {
|
||||
# backend = "nomad"
|
||||
# role = "deploy"
|
||||
# }
|
||||
|
||||
# Configure the Nomad provider
|
||||
provider "nomad" {
|
||||
address = length(var.nomad_address) == 0 ? local.nomad_node_address : var.nomad_address
|
||||
address = var.nomad_address
|
||||
secret_id = var.nomad_secret_id
|
||||
# secret_id = length(var.nomad_secret_id) == 0 ? data.vault_nomad_access_token.admin.secret_id : var.nomad_secret_id
|
||||
region = "global"
|
||||
|
10
services.tf
10
services.tf
@ -1,5 +1,5 @@
|
||||
module "services" {
|
||||
source = "./services"
|
||||
|
||||
depends_on = [module.databases, module.core]
|
||||
}
|
||||
# module "services" {
|
||||
# source = "./services"
|
||||
#
|
||||
# depends_on = [module.databases, module.core]
|
||||
# }
|
||||
|
18
vars.tf
18
vars.tf
@ -1,16 +1,6 @@
|
||||
variable "consul_address" {
|
||||
type = string
|
||||
default = "http://n1.thefij:8500"
|
||||
}
|
||||
|
||||
variable "vault_address" {
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "nomad_address" {
|
||||
type = string
|
||||
default = ""
|
||||
default = "http://n1.thefij:4646"
|
||||
}
|
||||
|
||||
variable "base_hostname" {
|
||||
@ -25,9 +15,3 @@ variable "nomad_secret_id" {
|
||||
sensitive = true
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "vault_token" {
|
||||
type = string
|
||||
sensitive = true
|
||||
default = ""
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user