WIP: Moving vars and service discovery to Nomad
Starting with core
This commit is contained in:
parent
ee68310e58
commit
65cb6afaf9
71
.terraform.lock.hcl
generated
71
.terraform.lock.hcl
generated
@ -2,40 +2,40 @@
|
|||||||
# Manual edits may be lost in future updates.
|
# Manual edits may be lost in future updates.
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/consul" {
|
provider "registry.terraform.io/hashicorp/consul" {
|
||||||
version = "2.16.2"
|
version = "2.17.0"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:epldE7sZPBTQHnWEA4WlNJIOVT1UEX+/02SMg5nniaE=",
|
"h1:k+8ptRn/iiCnE7mC0LVA8FvnukzKnlD3KAcquPFbtN8=",
|
||||||
"zh:0a2e11ca2ba650954951a087a1daec95eee2f3000456b295409a9880c4a10b1a",
|
"zh:1cca5e144b4696900d2410e26499a00c9666e5777b657e9844a4b6d198164a09",
|
||||||
"zh:34f6bda06a0d1c213fa8d87d4313687681e67bc8c40c4cbaa7dbe59ce24a4f7e",
|
"zh:4fe59329ae4a4fc13751cde4a1044427ca591ecefbaa8dde2ce828f660fbddb1",
|
||||||
"zh:5b85cf93db11ee890f720c317a38158927071feb634855786a0c0cd65825a43c",
|
"zh:55c42cec7dd10ee1f03eca03d5b8e3bcba7bf281bcd250ac220458aba735ba1f",
|
||||||
"zh:75ef915f3d087e6045751a66fbb7066a852a0944ec8c97200d1134dd84df7ffc",
|
"zh:625a0481d0b2599d0e6ac609d9efc151f1c9cad53091e2ee3bfcedc34ccacb34",
|
||||||
"zh:8a4a95697bd91ad51a581c12fe50ac61a114afba27895d027f77ac4154a7ea15",
|
"zh:7e9a08b19491f26aa685311a9211bacd7b7027d9cf6eaee16949435221a5f688",
|
||||||
"zh:973d538c8d72793861a1ac9718249a9493f417a2b5096846367560054fd843b9",
|
"zh:9d92816f609367204c4df20c29c57ee631f5a65cf6bb782d9d9b3f945ba21353",
|
||||||
"zh:9feb2bdc06fdc2d8370cc9aad9a0c69e7e5ae38aac43f315c3f57507c57be030",
|
"zh:a332ef65a6ba829dc335ade1a3e69ae14e162dc6ca1a991d9d6ad4e596f4c2d7",
|
||||||
"zh:c5709672d0afecbbe298bf519741ebcb9d04f02a73b5ee0c186dfa241aa5a524",
|
"zh:ce7ffac8d852342e9fe25053383613934c8b81d8c2ba2c9d10626b71e329fed7",
|
||||||
"zh:c65c60570de6da7190e1e7762577655a463caeb59bc5d38e33034821ed0cbcb9",
|
"zh:d384a1ef35c766362e8ae3131d00c05e1c0904d8b4b1d964548b91e1025f324b",
|
||||||
"zh:c958d6282650fc472aade61d5df4300936033f43cfb898293ef86aceccdfdf1d",
|
"zh:d85067f345b663e8e59fb02705918d3618ce56887a472665bec7f1aeddbc9ea4",
|
||||||
"zh:cdd3632c81e1d11d3becd193aaa061688840f39147950c45c4301d042743ae6a",
|
"zh:ddff8512e8181efae6d0d259abcd457d9a394a4a6f99d6bb0b180cabee373097",
|
||||||
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
|
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/external" {
|
provider "registry.terraform.io/hashicorp/external" {
|
||||||
version = "2.2.3"
|
version = "2.3.1"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:uvOYRWcVIqOZSl8YjjaB18yZFz1AWIt2CnK7O45rckg=",
|
"h1:bROCw6g5D/3fFnWeJ01L4IrdnJl1ILU8DGDgXCtYzaY=",
|
||||||
"zh:184ecd339d764de845db0e5b8a9c87893dcd0c9d822167f73658f89d80ec31c9",
|
"zh:001e2886dc81fc98cf17cf34c0d53cb2dae1e869464792576e11b0f34ee92f54",
|
||||||
"zh:2661eaca31d17d6bbb18a8f673bbfe3fe1b9b7326e60d0ceb302017003274e3c",
|
"zh:2eeac58dd75b1abdf91945ac4284c9ccb2bfb17fa9bdb5f5d408148ff553b3ee",
|
||||||
"zh:2c0a180f6d1fc2ba6e03f7dfc5f73b617e45408681f75bca75aa82f3796df0e4",
|
"zh:2fc39079ba61411a737df2908942e6970cb67ed2f4fb19090cd44ce2082903dd",
|
||||||
"zh:4b92ae44c6baef4c4952c47be00541055cb5280dd3bc8031dba5a1b2ee982387",
|
"zh:472a71c624952cff7aa98a7b967f6c7bb53153dbd2b8f356ceb286e6743bb4e2",
|
||||||
"zh:5641694d5daf3893d7ea90be03b6fa575211a08814ffe70998d5adb8b59cdc0a",
|
"zh:4cff06d31272aac8bc35e9b7faec42cf4554cbcbae1092eaab6ab7f643c215d9",
|
||||||
"zh:5bd55a2be8a1c20d732ac9c604b839e1cadc8c49006315dffa4d709b6874df32",
|
|
||||||
"zh:6e0ef5d11e1597202424b7d69b9da7b881494c9b13a3d4026fc47012dc651c79",
|
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||||
"zh:9e19f89fa25004d3b926a8d15ea630b4bde62f1fa4ed5e11a3d27aabddb77353",
|
"zh:7ed16ccd2049fa089616b98c0bd57219f407958f318f3c697843e2397ddf70df",
|
||||||
"zh:b763efdd69fd097616b4a4c89cf333b4cee9699ac6432d73d2756f8335d1213f",
|
"zh:842696362c92bf2645eb85c739410fd51376be6c488733efae44f4ce688da50e",
|
||||||
"zh:e3b561efdee510b2b445f76a52a902c52bee8e13095e7f4bed7c80f10f8d294a",
|
"zh:8985129f2eccfd7f1841ce06f3bf2bbede6352ec9e9f926fbaa6b1a05313b326",
|
||||||
"zh:fe660bb8781ee043a093b9a20e53069974475dcaa5791a1f45fd03c61a26478a",
|
"zh:a5f0602d8ec991a5411ef42f872aa90f6347e93886ce67905c53cfea37278e05",
|
||||||
|
"zh:bf4ab82cbe5256dcef16949973bf6aa1a98c2c73a98d6a44ee7bc40809d002b8",
|
||||||
|
"zh:e70770be62aa70198fa899526d671643ff99eecf265bf1a50e798fc3480bd417",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,22 +57,3 @@ provider "registry.terraform.io/hashicorp/nomad" {
|
|||||||
"zh:ffd1e096c1cc35de879c740a91918e9f06b627818a3cb4b1d87b829b54a6985f",
|
"zh:ffd1e096c1cc35de879c740a91918e9f06b627818a3cb4b1d87b829b54a6985f",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/vault" {
|
|
||||||
version = "3.11.0"
|
|
||||||
hashes = [
|
|
||||||
"h1:AUVEra6fAOiAUWa0FOU+ehx4K2htbsfgLDrMh1H6mQs=",
|
|
||||||
"zh:18cb684852f1b40b2a329ba07ece3363430d69bffdcafea48ed29f954481e39e",
|
|
||||||
"zh:1b96968a8de6849a237cc945cbe247ccd6ec98b4023548b1c0af5d6c6affe4ef",
|
|
||||||
"zh:3e0a0741ba12aa0cf1a2b8b80928450bb329343f4b41f35b0eddbeb52aa6284b",
|
|
||||||
"zh:4a8f0ee5ac4e8a0705d9f38b3d549223fe1142486d71f0b6f24f64ae0d7dd5ca",
|
|
||||||
"zh:4cc6705dcd111e6ad47ab4cfd2d8a99b2b241967abd50add6ac8c27025f4128b",
|
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
|
||||||
"zh:8e106e840a963b9ae32dc24b50fa1ceecb09753e6db10ab134009d59d170686b",
|
|
||||||
"zh:8f9c4ccf4da8555b11375d2a09a022d7a8f5ecf701f0bb89a4f07ad0b720bb98",
|
|
||||||
"zh:a6fda115017b42f71f4b7917ae4860354920f0653cb8906ce627129dbabb252b",
|
|
||||||
"zh:c01666362b293b6af8cd556b2c5ffe9014ae8640ec3621c1cfa772fa1a6b335d",
|
|
||||||
"zh:e9be58b1211da0219a5bf6bfd81b8bf474256519426df10672e6dfce3086af60",
|
|
||||||
"zh:fd2272083e90b38c28cd18b1b9d3ae14b6a0ebf08985468d010d2bee8df816e0",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
@ -13,152 +13,6 @@
|
|||||||
create: true
|
create: true
|
||||||
line: "nameserver {{ non_nomad_dns }}"
|
line: "nameserver {{ non_nomad_dns }}"
|
||||||
|
|
||||||
- name: Build Consul cluster
|
|
||||||
hosts: consul_instances
|
|
||||||
any_errors_fatal: true
|
|
||||||
|
|
||||||
roles:
|
|
||||||
- role: ansible-consul
|
|
||||||
vars:
|
|
||||||
consul_version: "1.13.3-1"
|
|
||||||
consul_install_upgrade: true
|
|
||||||
consul_install_from_repo: true
|
|
||||||
consul_os_repo_prerequisites: []
|
|
||||||
|
|
||||||
consul_node_role: server
|
|
||||||
consul_raft_protocol: 3
|
|
||||||
consul_bootstrap_expect: true
|
|
||||||
consul_bootstrap_expect_max_value: 2
|
|
||||||
consul_bootstrap_expect_value: "{{ [(play_hosts | length), consul_bootstrap_expect_max_value] | min }}"
|
|
||||||
|
|
||||||
consul_user: consul
|
|
||||||
consul_manage_user: true
|
|
||||||
consul_group: bin
|
|
||||||
consul_manage_group: true
|
|
||||||
|
|
||||||
# consul_tls_enable: true
|
|
||||||
consul_connect_enabled: true
|
|
||||||
consul_ports_grpc: 8502
|
|
||||||
consul_client_address: "0.0.0.0"
|
|
||||||
|
|
||||||
# Autopilot
|
|
||||||
consul_autopilot_enable: false
|
|
||||||
consul_autopilot_cleanup_dead_Servers: false
|
|
||||||
|
|
||||||
# Enable metrics
|
|
||||||
consul_config_custom:
|
|
||||||
telemetry:
|
|
||||||
prometheus_retention_time: "2h"
|
|
||||||
|
|
||||||
# DNS forwarding
|
|
||||||
consul_dnsmasq_enable: true
|
|
||||||
consul_dnsmasq_servers:
|
|
||||||
# TODO: use addresses of other nomad nodes?
|
|
||||||
# Maybe this can be [] to get the values from dhcp
|
|
||||||
- 1.1.1.1
|
|
||||||
- 1.0.0.1
|
|
||||||
consul_dnsmasq_bind_interfaces: true
|
|
||||||
consul_dnsmasq_listen_addresses:
|
|
||||||
# Listen only to loopback interface
|
|
||||||
- 127.0.0.1
|
|
||||||
|
|
||||||
become: true
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Start Consul
|
|
||||||
systemd:
|
|
||||||
state: started
|
|
||||||
name: consul
|
|
||||||
become: true
|
|
||||||
|
|
||||||
# If DNS is broken after dnsmasq, then need to set /etc/resolv.conf to something
|
|
||||||
# pointing to 127.0.0.1 and possibly restart Docker and Nomad
|
|
||||||
# Actually, we should point to our external Nomad address so that Docker uses it
|
|
||||||
- name: Update resolv.conf
|
|
||||||
lineinfile:
|
|
||||||
dest: /etc/resolv.conf
|
|
||||||
create: true
|
|
||||||
line: "nameserver {{ hostvars[item].ansible_default_ipv4.address }}"
|
|
||||||
loop: "{{ ansible_play_hosts }}"
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Setup Vault cluster
|
|
||||||
hosts: vault_instances
|
|
||||||
|
|
||||||
roles:
|
|
||||||
- name: ansible-vault
|
|
||||||
vars:
|
|
||||||
vault_version: 1.12.2-1
|
|
||||||
vault_install_hashi_repo: true
|
|
||||||
vault_harden_file_perms: true
|
|
||||||
# Maybe this should be restricted
|
|
||||||
vault_group: bin
|
|
||||||
vault_bin_path: /usr/bin
|
|
||||||
vault_address: 0.0.0.0
|
|
||||||
|
|
||||||
vault_backend: consul
|
|
||||||
become: true
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Get Vault status
|
|
||||||
uri:
|
|
||||||
url: http://127.0.0.1:8200/v1/sys/health
|
|
||||||
method: GET
|
|
||||||
status_code: 200, 429, 472, 473, 501, 503
|
|
||||||
body_format: json
|
|
||||||
return_content: true
|
|
||||||
register: vault_status
|
|
||||||
|
|
||||||
- name: Initialize Vault
|
|
||||||
when: not vault_status.json["initialized"]
|
|
||||||
block:
|
|
||||||
- name: Initialize Vault
|
|
||||||
command:
|
|
||||||
argv:
|
|
||||||
- "vault"
|
|
||||||
- "operator"
|
|
||||||
- "init"
|
|
||||||
- "-format=json"
|
|
||||||
- "-address=http://127.0.0.1:8200/"
|
|
||||||
- "-key-shares={{ vault_init_key_shares|default(3) }}"
|
|
||||||
- "-key-threshold={{ vault_init_key_threshold|default(2) }}"
|
|
||||||
run_once: true
|
|
||||||
register: vault_init
|
|
||||||
|
|
||||||
- name: Save initialize result
|
|
||||||
copy:
|
|
||||||
content: "{{ vault_init.stdout }}"
|
|
||||||
dest: "../vault-keys.json"
|
|
||||||
when: vault_init is succeeded
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Unseal from init
|
|
||||||
no_log: true
|
|
||||||
command:
|
|
||||||
argv:
|
|
||||||
- "vault"
|
|
||||||
- "operator"
|
|
||||||
- "unseal"
|
|
||||||
- "-address=http://127.0.0.1:8200/"
|
|
||||||
- "{{ item }}"
|
|
||||||
loop: "{{ (vault_init.stdout | from_json)['unseal_keys_hex'] }}"
|
|
||||||
when: vault_init is succeeded
|
|
||||||
|
|
||||||
- name: Unseal Vault
|
|
||||||
no_log: true
|
|
||||||
command:
|
|
||||||
argv:
|
|
||||||
- "vault"
|
|
||||||
- "operator"
|
|
||||||
- "unseal"
|
|
||||||
- "-address=http://127.0.0.1:8200/"
|
|
||||||
- "{{ item }}"
|
|
||||||
loop: "{{ unseal_keys_hex }}"
|
|
||||||
when:
|
|
||||||
- unseal_keys_hex is defined
|
|
||||||
- vault_status.json["sealed"]
|
|
||||||
|
|
||||||
- name: Install Docker
|
- name: Install Docker
|
||||||
hosts: nomad_instances
|
hosts: nomad_instances
|
||||||
become: true
|
become: true
|
||||||
@ -309,8 +163,8 @@
|
|||||||
enabled: true
|
enabled: true
|
||||||
selinuxlabel: "z"
|
selinuxlabel: "z"
|
||||||
# Send logs to journald so we can scrape them for Loki
|
# Send logs to journald so we can scrape them for Loki
|
||||||
logging:
|
# logging:
|
||||||
type: journald
|
# type: journald
|
||||||
extra_labels:
|
extra_labels:
|
||||||
- "job_name"
|
- "job_name"
|
||||||
- "job_id"
|
- "job_id"
|
||||||
@ -341,35 +195,9 @@
|
|||||||
# Enable ACLs
|
# Enable ACLs
|
||||||
nomad_acl_enabled: true
|
nomad_acl_enabled: true
|
||||||
|
|
||||||
# Enable vault integration
|
|
||||||
# HACK: Only talk to local Vault for now because it doesn't have HTTPS
|
|
||||||
# TODO: Would be really great to have this over https and point to vault.consul.service
|
|
||||||
# nomad_vault_address: "https://vault.service.consul:8200"
|
|
||||||
# Right now, each node only talks to it's local Vault, so if that node is rebooted and
|
|
||||||
# that vault is sealed, it will not have access to vault. This is a problem if a node
|
|
||||||
# must reboot.
|
|
||||||
nomad_vault_address: "http://127.0.0.1:8200"
|
|
||||||
# TODO: This fails on first run because the Nomad-Vault integration can't be set up
|
|
||||||
# until Nomad has started. Could maybe figure out if ACLs have been set up and leave
|
|
||||||
# these out until the later play, maybe just bootstrap the nomad-cluster role in Vault
|
|
||||||
# befor Nomad is set up
|
|
||||||
nomad_vault_create_from_role: "nomad-cluster"
|
|
||||||
# TODO: (security) Probably want to restict this to a narrower scoped token
|
|
||||||
nomad_vault_enabled: "{{ root_token is defined }}"
|
|
||||||
nomad_vault_token: "{{ root_token | default('') }}"
|
|
||||||
|
|
||||||
nomad_config_custom:
|
nomad_config_custom:
|
||||||
ui:
|
ui:
|
||||||
enabled: true
|
enabled: true
|
||||||
consul:
|
|
||||||
ui_url: "https://consul.thefij.rocks/ui"
|
|
||||||
vault:
|
|
||||||
ui_url: "https://vault.thefij.rocks/ui"
|
|
||||||
consul:
|
|
||||||
tags:
|
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.consulcatalog.connect=true"
|
|
||||||
- "traefik.http.routers.nomadclient.entrypoints=websecure"
|
|
||||||
|
|
||||||
- name: Bootstrap Nomad ACLs and scheduler
|
- name: Bootstrap Nomad ACLs and scheduler
|
||||||
hosts: nomad_instances
|
hosts: nomad_instances
|
||||||
@ -424,20 +252,6 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
register: read_secretid
|
register: read_secretid
|
||||||
|
|
||||||
- name: Enable service scheduler preemption
|
|
||||||
command:
|
|
||||||
argv:
|
|
||||||
- nomad
|
|
||||||
- operator
|
|
||||||
- scheduler
|
|
||||||
- set-config
|
|
||||||
- -preempt-system-scheduler=true
|
|
||||||
- -preempt-service-scheduler=true
|
|
||||||
environment:
|
|
||||||
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
|
|
||||||
delegate_to: "{{ play_hosts[0] }}"
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Look for policy
|
- name: Look for policy
|
||||||
command:
|
command:
|
||||||
argv:
|
argv:
|
||||||
@ -454,8 +268,6 @@
|
|||||||
copy:
|
copy:
|
||||||
src: ../acls/nomad-anon-policy.hcl
|
src: ../acls/nomad-anon-policy.hcl
|
||||||
dest: /tmp/anonymous.policy.hcl
|
dest: /tmp/anonymous.policy.hcl
|
||||||
delegate_to: "{{ play_hosts[0] }}"
|
|
||||||
register: anon_policy
|
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: Create anon-policy
|
- name: Create anon-policy
|
||||||
@ -474,18 +286,32 @@
|
|||||||
delegate_to: "{{ play_hosts[0] }}"
|
delegate_to: "{{ play_hosts[0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: Set up Nomad backend and roles in Vault
|
- name: Enable service scheduler preemption
|
||||||
community.general.terraform:
|
command:
|
||||||
project_path: ../acls
|
argv:
|
||||||
force_init: true
|
- nomad
|
||||||
variables:
|
- operator
|
||||||
consul_address: "{{ play_hosts[0] }}:8500"
|
- scheduler
|
||||||
vault_token: "{{ root_token }}"
|
- set-config
|
||||||
nomad_secret_id: "{{ read_secretid.stdout }}"
|
- -preempt-system-scheduler=true
|
||||||
delegate_to: localhost
|
- -preempt-service-scheduler=true
|
||||||
|
environment:
|
||||||
|
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
|
||||||
|
delegate_to: "{{ play_hosts[0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
notify:
|
|
||||||
- Restart Nomad
|
# - name: Set up Nomad backend and roles in Vault
|
||||||
|
# community.general.terraform:
|
||||||
|
# project_path: ../acls
|
||||||
|
# force_init: true
|
||||||
|
# variables:
|
||||||
|
# consul_address: "{{ play_hosts[0] }}:8500"
|
||||||
|
# vault_token: "{{ root_token }}"
|
||||||
|
# nomad_secret_id: "{{ read_secretid.stdout }}"
|
||||||
|
# delegate_to: localhost
|
||||||
|
# run_once: true
|
||||||
|
# notify:
|
||||||
|
# - Restart Nomad
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
- name: Restart Nomad
|
- name: Restart Nomad
|
||||||
|
@ -37,11 +37,13 @@ job "blocky" {
|
|||||||
|
|
||||||
service {
|
service {
|
||||||
name = "blocky-dns"
|
name = "blocky-dns"
|
||||||
|
provider = "nomad"
|
||||||
port = "dns"
|
port = "dns"
|
||||||
}
|
}
|
||||||
|
|
||||||
service {
|
service {
|
||||||
name = "blocky-api"
|
name = "blocky-api"
|
||||||
|
provider = "nomad"
|
||||||
port = "api"
|
port = "api"
|
||||||
|
|
||||||
meta {
|
meta {
|
||||||
@ -53,41 +55,6 @@ job "blocky" {
|
|||||||
"traefik.http.routers.blocky-api.entryPoints=websecure",
|
"traefik.http.routers.blocky-api.entryPoints=websecure",
|
||||||
]
|
]
|
||||||
|
|
||||||
connect {
|
|
||||||
sidecar_service {
|
|
||||||
proxy {
|
|
||||||
local_service_port = 4000
|
|
||||||
|
|
||||||
expose {
|
|
||||||
path {
|
|
||||||
path = "/metrics"
|
|
||||||
protocol = "http"
|
|
||||||
local_path_port = 4000
|
|
||||||
listener_port = "api"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
upstreams {
|
|
||||||
destination_name = "redis"
|
|
||||||
local_bind_port = 6379
|
|
||||||
}
|
|
||||||
|
|
||||||
upstreams {
|
|
||||||
destination_name = "mysql-server"
|
|
||||||
local_bind_port = 4040
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecar_task {
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 20
|
|
||||||
memory_max = 50
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
check {
|
check {
|
||||||
name = "api-health"
|
name = "api-health"
|
||||||
port = "api"
|
port = "api"
|
||||||
@ -118,13 +85,6 @@ job "blocky" {
|
|||||||
memory_max = 100
|
memory_max = 100
|
||||||
}
|
}
|
||||||
|
|
||||||
vault {
|
|
||||||
policies = [
|
|
||||||
"access-tables",
|
|
||||||
"nomad-task",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
template {
|
||||||
data = var.config_data
|
data = var.config_data
|
||||||
destination = "app/config.yml"
|
destination = "app/config.yml"
|
||||||
@ -154,38 +114,33 @@ job "blocky" {
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
vault {
|
|
||||||
policies = [
|
|
||||||
"access-tables",
|
|
||||||
"nomad-task",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
template {
|
||||||
data = <<EOF
|
data = <<EOF
|
||||||
[client]
|
[client]
|
||||||
host={{ env "NOMAD_UPSTREAM_IP_mysql_server" }}
|
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-server" -}}
|
||||||
port={{ env "NOMAD_UPSTREAM_PORT_mysql_server" }}
|
host={{ .Address }}
|
||||||
|
port={{ .Port }}
|
||||||
|
{{ end -}}
|
||||||
user=root
|
user=root
|
||||||
{{ with service "vault" -}}{{ with secret "kv/data/mysql" }}
|
{{ with nomadVar "nomad/jobs" }}
|
||||||
password={{ .Data.data.root_password }}
|
password={{ .mysql_root_password }}
|
||||||
{{ end -}}{{ end -}}
|
{{ end }}
|
||||||
EOF
|
EOF
|
||||||
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
|
destination = "$${NOMAD_SECRETS_DIR}/my.cnf"
|
||||||
}
|
}
|
||||||
|
|
||||||
template {
|
template {
|
||||||
data = <<EOF
|
data = <<EOF
|
||||||
{{ with service "vault" -}}{{ with secret "kv/data/blocky" -}}
|
{{ with nomadVar "nomad/jobs/blocky" -}}
|
||||||
{{ if .Data.data.db_name -}}
|
{{ if .db_name -}}
|
||||||
{{ $db_name := .Data.data.db_name }}
|
{{ $db_name := .db_name }}
|
||||||
CREATE DATABASE IF NOT EXISTS `{{ $db_name }}`;
|
CREATE DATABASE IF NOT EXISTS `{{ $db_name }}`;
|
||||||
CREATE USER IF NOT EXISTS '{{ .Data.data.db_user }}'@'%' IDENTIFIED BY '{{ .Data.data.db_pass }}';
|
CREATE USER IF NOT EXISTS '{{ .db_user }}'@'%' IDENTIFIED BY '{{ .db_pass }}';
|
||||||
GRANT ALL ON `{{ $db_name }}`.* to '{{ .Data.data.db_user }}'@'%';
|
GRANT ALL ON `{{ $db_name }}`.* to '{{ .db_user }}'@'%';
|
||||||
{{ with secret "kv/data/grafana" -}}
|
{{ with nomadVar "nomad/jobs" -}}
|
||||||
-- Add grafana read_only user
|
-- Add grafana read_only user
|
||||||
CREATE USER IF NOT EXISTS '{{ .Data.data.db_user_ro }}'@'%' IDENTIFIED BY '{{ .Data.data.db_pass_ro }}';
|
CREATE USER IF NOT EXISTS '{{ .db_user_ro }}'@'%' IDENTIFIED BY '{{ .db_pass_ro }}';
|
||||||
GRANT SELECT ON `{{ $db_name }}`.* to '{{ .Data.data.db_user_ro }}'@'%';
|
GRANT SELECT ON `{{ $db_name }}`.* to '{{ .db_user_ro }}'@'%';
|
||||||
{{ end -}}
|
{{ end -}}
|
||||||
{{ else -}}
|
{{ else -}}
|
||||||
SELECT 'NOOP';
|
SELECT 'NOOP';
|
||||||
|
@ -25,7 +25,8 @@ upstream:
|
|||||||
conditional:
|
conditional:
|
||||||
fallbackUpstream: false
|
fallbackUpstream: false
|
||||||
mapping:
|
mapping:
|
||||||
consul: {{ env "attr.unique.network.ip-address" }}:8600
|
# TODO: Run a simple dns server that this can forward to where it's hosts are set by nomad-services
|
||||||
|
# consul: {{ env "attr.unique.network.ip-address" }}:8600
|
||||||
home.arpa: 192.168.2.1
|
home.arpa: 192.168.2.1
|
||||||
in-addr.arpa: 192.168.2.1
|
in-addr.arpa: 192.168.2.1
|
||||||
iot: 192.168.2.1
|
iot: 192.168.2.1
|
||||||
@ -60,8 +61,9 @@ blocking:
|
|||||||
whiteLists:
|
whiteLists:
|
||||||
# Move to Gitea when deployed internally
|
# Move to Gitea when deployed internally
|
||||||
ads:
|
ads:
|
||||||
{{ keyOrDefault "blocky/whitelists/ads" "# None" | indent 6 }}
|
{{ with nomadVar "nomad/jobs/blocky" -}}
|
||||||
|
{{ .whitelists_ads | indent 6 }}
|
||||||
|
{{- end }}
|
||||||
clientGroupsBlock:
|
clientGroupsBlock:
|
||||||
default:
|
default:
|
||||||
- ads
|
- ads
|
||||||
@ -75,37 +77,39 @@ blocking:
|
|||||||
customDNS:
|
customDNS:
|
||||||
customTTL: 1h
|
customTTL: 1h
|
||||||
mapping:
|
mapping:
|
||||||
{{ with service "traefik" -}}
|
{{ with nomadService "traefik" -}}
|
||||||
{{- $last := len . | subtract 1 -}}
|
{{- $last := len . | subtract 1 -}}
|
||||||
{{- $services := . -}}
|
{{- $services := . -}}
|
||||||
{{ keyOrDefault "global/base_hostname" "${base_hostname}" }}: {{ range $i := loop $last -}}
|
{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}: {{ range $i := loop $last -}}
|
||||||
{{- with index $services $i }}{{ .Address }},{{ end -}}
|
{{- with index $services $i }}{{ .Address }},{{ end -}}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
{{- with index . $last }}{{ .Address }}{{ end -}}
|
{{- with index . $last }}{{ .Address }}{{ end -}}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
# Other mappings
|
# Other mappings
|
||||||
{{ keyOrDefault "blocky/mappings" "# None" | indent 4 }}
|
{{ with nomadVar "nomad/jobs/blocky" }}{{ .mappings | indent 4 }}{{ end }}
|
||||||
|
|
||||||
prometheus:
|
prometheus:
|
||||||
enable: true
|
enable: true
|
||||||
|
|
||||||
{{ with service "redis" -}}
|
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis" -}}
|
||||||
redis:
|
redis:
|
||||||
address: {{ env "NOMAD_UPSTREAM_ADDR_redis" }}
|
address: {{ .Address }}:{{ .Port }}
|
||||||
# password: ""
|
# password: ""
|
||||||
# database: 0
|
# database: 0
|
||||||
connectionAttempts: 10
|
connectionAttempts: 10
|
||||||
connectionCooldown: 3s
|
connectionCooldown: 3s
|
||||||
{{ end -}}
|
{{ end -}}
|
||||||
|
|
||||||
{{ with service "vault" -}}{{ with service "mysql-server" -}}
|
{{ $mysql_addr := "" }}
|
||||||
{{ with secret "kv/data/blocky" -}}
|
{{ with nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-server" -}}{{ range . -}}
|
||||||
|
{{ $mysql_addr = print .Address ":" .Port }}
|
||||||
|
{{- end }}{{- end }}
|
||||||
|
{{ with nomadVar "nomad/jobs/blocky" -}}
|
||||||
queryLog:
|
queryLog:
|
||||||
type: mysql
|
type: mysql
|
||||||
target: {{ .Data.data.db_user }}:{{ .Data.data.db_pass }}@tcp({{ env "NOMAD_UPSTREAM_ADDR_mysql_server" }})/{{ .Data.data.db_name }}?charset=utf8mb4&parseTime=True&loc=Local
|
target: {{ .db_user }}:{{ .db_pass }}@tcp({{ $mysql_addr }})/{{ .db_name }}?charset=utf8mb4&parseTime=True&loc=Local
|
||||||
logRetentionDays: 14
|
logRetentionDays: 14
|
||||||
{{ end -}}
|
{{ end -}}
|
||||||
{{ end -}}{{ end -}}
|
|
||||||
|
|
||||||
port: 53
|
port: 53
|
||||||
httpPort: 4000
|
httpPort: 4000
|
||||||
|
@ -17,29 +17,22 @@ job "ddclient" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
vault {
|
|
||||||
policies = [
|
|
||||||
"access-tables",
|
|
||||||
"nomad-task",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
template {
|
||||||
data = <<EOH
|
data = <<EOH
|
||||||
|
{{ with nomadVar "nomad/jobs/ddclient" -}}
|
||||||
daemon=900
|
daemon=900
|
||||||
ssl=yes
|
ssl=yes
|
||||||
use=web
|
use=web
|
||||||
web=api.myip.com
|
web=api.myip.com
|
||||||
|
|
||||||
protocol=cloudflare,
|
protocol=cloudflare,
|
||||||
zone={{ key "ddclient/zone" }},
|
zone={{ .zone }},
|
||||||
ttl=1,
|
ttl=1,
|
||||||
{{ with secret "kv/data/cloudflare" -}}
|
|
||||||
login=token,
|
login=token,
|
||||||
password={{ .Data.data.domain_ddclient }}
|
password={{ .cloudflare_domain_key }}
|
||||||
{{ end -}}
|
|
||||||
|
|
||||||
{{ key "ddclient/domain" }}
|
{{ .domain }}
|
||||||
|
{{- end }}
|
||||||
EOH
|
EOH
|
||||||
destination = "secrets/ddclient.conf"
|
destination = "secrets/ddclient.conf"
|
||||||
change_mode = "restart"
|
change_mode = "restart"
|
||||||
|
@ -8,12 +8,10 @@ job "lldap" {
|
|||||||
mode = "bridge"
|
mode = "bridge"
|
||||||
|
|
||||||
port "web" {
|
port "web" {
|
||||||
host_network = "loopback"
|
|
||||||
to = 17170
|
to = 17170
|
||||||
}
|
}
|
||||||
|
|
||||||
port "ldap" {
|
port "ldap" {
|
||||||
host_network = "loopback"
|
|
||||||
to = 3890
|
to = 3890
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -26,47 +24,15 @@ job "lldap" {
|
|||||||
|
|
||||||
service {
|
service {
|
||||||
name = "lldap"
|
name = "lldap"
|
||||||
|
provider = "nomad"
|
||||||
port = "ldap"
|
port = "ldap"
|
||||||
|
|
||||||
connect {
|
|
||||||
sidecar_service {
|
|
||||||
proxy {
|
|
||||||
local_service_port = 3890
|
|
||||||
|
|
||||||
config {
|
|
||||||
protocol = "tcp"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecar_task {
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 20
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
service {
|
service {
|
||||||
name = "ldap-admin"
|
name = "ldap-admin"
|
||||||
|
provider = "nomad"
|
||||||
port = "web"
|
port = "web"
|
||||||
|
|
||||||
connect {
|
|
||||||
sidecar_service {
|
|
||||||
proxy {
|
|
||||||
local_service_port = 17170
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecar_task {
|
|
||||||
resources {
|
|
||||||
cpu = 20
|
|
||||||
memory = 20
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = [
|
tags = [
|
||||||
"traefik.enable=true",
|
"traefik.enable=true",
|
||||||
"traefik.http.routers.ldap-admin.entryPoints=websecure",
|
"traefik.http.routers.ldap-admin.entryPoints=websecure",
|
||||||
@ -94,13 +60,6 @@ job "lldap" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
vault {
|
|
||||||
policies = [
|
|
||||||
"access-tables",
|
|
||||||
"nomad-task",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
env = {
|
env = {
|
||||||
"LLDAP_LDAP_PORT" = "${NOMAD_PORT_ldap}"
|
"LLDAP_LDAP_PORT" = "${NOMAD_PORT_ldap}"
|
||||||
"LLDAP_HTTP_PORT" = "${NOMAD_PORT_web}"
|
"LLDAP_HTTP_PORT" = "${NOMAD_PORT_web}"
|
||||||
@ -110,25 +69,25 @@ job "lldap" {
|
|||||||
data = <<EOH
|
data = <<EOH
|
||||||
database_url = "sqlite:///data/users.db?mode=rwc"
|
database_url = "sqlite:///data/users.db?mode=rwc"
|
||||||
key_file = "/data/private_key"
|
key_file = "/data/private_key"
|
||||||
ldap_base_dn = "{{ keyOrDefault "global/ldap/base_dn" "dc=example,dc=com" }}"
|
ldap_base_dn = "{{ with nomadVar "nomad/jobs" }}{{ .base_dn }}{{ end }}"
|
||||||
{{ with secret "kv/data/lldap" -}}
|
{{ with nomadVar "nomad/jobs/lldap" }}
|
||||||
jwt_secret = "{{ .Data.data.jwt_secret }}"
|
jwt_secret = "{{ .jwt_secret }}"
|
||||||
ldap_user_dn = "{{ .Data.data.admin_user }}"
|
ldap_user_dn = "{{ .admin_user }}"
|
||||||
ldap_user_email = "{{ .Data.data.admin_email }}"
|
ldap_user_email = "{{ .admin_email }}"
|
||||||
ldap_user_pass = "{{ .Data.data.admin_password }}"
|
ldap_user_pass = "{{ .admin_password }}"
|
||||||
{{ end -}}
|
{{ end -}}
|
||||||
{{ with secret "kv/data/smtp" -}}
|
{{ with nomadVar "nomad/jobs" -}}
|
||||||
[smtp_options]
|
[smtp_options]
|
||||||
enable_password_reset = true
|
enable_password_reset = true
|
||||||
server = "{{ .Data.data.server }}"
|
server = "{{ .smtp_server }}"
|
||||||
port = {{ .Data.data.port }}
|
port = {{ .smtp_port }}
|
||||||
tls_required = {{ .Data.data.tls }}
|
tls_required = {{ .smtp_tls }}
|
||||||
user = "{{ .Data.data.user }}"
|
user = "{{ .smtp_user }}"
|
||||||
password = "{{ .Data.data.password }}"
|
password = "{{ .smtp_password }}"
|
||||||
{{ with secret "kv/data/lldap" -}}
|
|
||||||
from = "{{ .Data.data.smtp_from }}"
|
|
||||||
reply_to = "{{ .Data.data.smtp_reply_to }}"
|
|
||||||
{{ end -}}
|
{{ end -}}
|
||||||
|
{{ with nomadVar "nomad/jobs/lldap" -}}
|
||||||
|
from = "{{ .smtp_from }}"
|
||||||
|
reply_to = "{{ .smtp_reply_to }}"
|
||||||
{{ end -}}
|
{{ end -}}
|
||||||
EOH
|
EOH
|
||||||
destination = "secrets/lldap_config.toml"
|
destination = "secrets/lldap_config.toml"
|
||||||
|
77
core/main.tf
77
core/main.tf
@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
module "blocky" {
|
module "blocky" {
|
||||||
source = "./blocky"
|
source = "./blocky"
|
||||||
|
|
||||||
@ -30,11 +29,11 @@ module "nomad_login" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
module "metrics" {
|
# module "metrics" {
|
||||||
source = "./metrics"
|
# source = "./metrics"
|
||||||
# Not in this module
|
# # Not in this module
|
||||||
# depends_on = [module.databases]
|
# # depends_on = [module.databases]
|
||||||
}
|
# }
|
||||||
|
|
||||||
module "loki" {
|
module "loki" {
|
||||||
source = "IamTheFij/levant/nomad"
|
source = "IamTheFij/levant/nomad"
|
||||||
@ -57,34 +56,6 @@ module "loki" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "consul_config_entry" "loki_intent" {
|
|
||||||
name = "loki"
|
|
||||||
kind = "service-intentions"
|
|
||||||
|
|
||||||
config_json = jsonencode({
|
|
||||||
Sources = [
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "grafana"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "promtail"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "syslogng-promtail"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nomad_job" "syslog-ng" {
|
resource "nomad_job" "syslog-ng" {
|
||||||
jobspec = file("${path.module}/syslogng.nomad")
|
jobspec = file("${path.module}/syslogng.nomad")
|
||||||
}
|
}
|
||||||
@ -96,41 +67,3 @@ resource "nomad_job" "ddclient" {
|
|||||||
resource "nomad_job" "lldap" {
|
resource "nomad_job" "lldap" {
|
||||||
jobspec = file("${path.module}/lldap.nomad")
|
jobspec = file("${path.module}/lldap.nomad")
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "consul_config_entry" "syslogng_promtail_intent" {
|
|
||||||
name = "syslogng-promtail"
|
|
||||||
kind = "service-intentions"
|
|
||||||
|
|
||||||
config_json = jsonencode({
|
|
||||||
Sources = [
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "syslogng"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "consul_config_entry" "global_access" {
|
|
||||||
name = "*"
|
|
||||||
kind = "service-intentions"
|
|
||||||
|
|
||||||
config_json = jsonencode({
|
|
||||||
Sources = [
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "traefik"
|
|
||||||
Precedence = 6
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action = "deny"
|
|
||||||
Name = "*"
|
|
||||||
Precedence = 5
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
@ -19,28 +19,8 @@ job "syslogng" {
|
|||||||
|
|
||||||
service {
|
service {
|
||||||
name = "syslogng-promtail"
|
name = "syslogng-promtail"
|
||||||
|
provider = "nomad"
|
||||||
port = "main"
|
port = "main"
|
||||||
|
|
||||||
connect {
|
|
||||||
sidecar_service {
|
|
||||||
proxy {
|
|
||||||
local_service_port = 1514
|
|
||||||
|
|
||||||
upstreams {
|
|
||||||
destination_name = "loki"
|
|
||||||
local_bind_port = 1000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecar_task {
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 20
|
|
||||||
memory_max = 50
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
task "promtail" {
|
task "promtail" {
|
||||||
@ -72,7 +52,9 @@ server:
|
|||||||
http_listen_port: 9080
|
http_listen_port: 9080
|
||||||
|
|
||||||
clients:
|
clients:
|
||||||
- url: http://{{ env "NOMAD_UPSTREAM_ADDR_loki" }}/loki/api/v1/push
|
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "loki" -}}
|
||||||
|
- url: http://{{ .Address }}:{{ .Port }}/loki/api/v1/push
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
# TCP syslog receiver
|
# TCP syslog receiver
|
||||||
@ -161,8 +143,9 @@ source s_internal {
|
|||||||
};
|
};
|
||||||
|
|
||||||
destination d_loki {
|
destination d_loki {
|
||||||
# Forward to Connect proxy to Promtail
|
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "syslogng-promtail" -}}
|
||||||
syslog("{{ env "NOMAD_UPSTREAM_IP_syslogng-promtail" }}" transport("tcp") port({{ env "NOMAD_UPSTREAM_PORT_syslogng-promtail" }}));
|
syslog("{{ .Address }}" transport("tcp") port({{ .Port }}));
|
||||||
|
{{- end }}
|
||||||
};
|
};
|
||||||
|
|
||||||
log { source(s_internal); destination(d_loki); };
|
log { source(s_internal); destination(d_loki); };
|
||||||
|
@ -49,6 +49,7 @@ job "traefik" {
|
|||||||
|
|
||||||
service {
|
service {
|
||||||
name = "traefik"
|
name = "traefik"
|
||||||
|
provider = "nomad"
|
||||||
port = "web"
|
port = "web"
|
||||||
|
|
||||||
check {
|
check {
|
||||||
@ -59,10 +60,6 @@ job "traefik" {
|
|||||||
timeout = "2s"
|
timeout = "2s"
|
||||||
}
|
}
|
||||||
|
|
||||||
connect {
|
|
||||||
native = true
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = [
|
tags = [
|
||||||
"traefik.enable=true",
|
"traefik.enable=true",
|
||||||
"traefik.http.routers.traefik.entryPoints=websecure",
|
"traefik.http.routers.traefik.entryPoints=websecure",
|
||||||
@ -98,10 +95,6 @@ job "traefik" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
vault {
|
|
||||||
policies = ["access-tables", "nomad-task"]
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
template {
|
||||||
# Avoid conflict with TOML lists [[ ]] and Go templates {{ }}
|
# Avoid conflict with TOML lists [[ ]] and Go templates {{ }}
|
||||||
left_delimiter = "<<"
|
left_delimiter = "<<"
|
||||||
@ -122,11 +115,9 @@ job "traefik" {
|
|||||||
[entryPoints.websecure]
|
[entryPoints.websecure]
|
||||||
address = ":443"
|
address = ":443"
|
||||||
[entryPoints.websecure.http.tls]
|
[entryPoints.websecure.http.tls]
|
||||||
<< if keyExists "traefik/acme/email" ->>
|
|
||||||
certResolver = "letsEncrypt"
|
certResolver = "letsEncrypt"
|
||||||
[[entryPoints.websecure.http.tls.domains]]
|
[[entryPoints.websecure.http.tls.domains]]
|
||||||
main = "*.<< keyOrDefault "global/base_hostname" "${var.base_hostname}" >>"
|
main = "*.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>"
|
||||||
<< end ->>
|
|
||||||
|
|
||||||
[entryPoints.metrics]
|
[entryPoints.metrics]
|
||||||
address = ":8989"
|
address = ":8989"
|
||||||
@ -152,40 +143,31 @@ job "traefik" {
|
|||||||
directory = "/etc/traefik/conf"
|
directory = "/etc/traefik/conf"
|
||||||
watch = true
|
watch = true
|
||||||
|
|
||||||
[providers.consulCatalog]
|
|
||||||
connectAware = true
|
|
||||||
connectByDefault = true
|
|
||||||
exposedByDefault = false
|
|
||||||
defaultRule = "Host(`{{normalize .Name}}.<< keyOrDefault "global/base_hostname" "${var.base_hostname}" >>`)"
|
|
||||||
[providers.consulCatalog.endpoint]
|
|
||||||
address = "http://<< env "CONSUL_HTTP_ADDR" >>"
|
|
||||||
|
|
||||||
[providers.nomad]
|
[providers.nomad]
|
||||||
exposedByDefault = false
|
exposedByDefault = false
|
||||||
defaultRule = "Host(`{{normalize .Name}}.<< keyOrDefault "global/base_hostname" "${var.base_hostname}" >>`)"
|
defaultRule = "Host(`{{normalize .Name}}.<< with nomadVar "nomad/jobs" >><< .base_hostname >><< end >>`)"
|
||||||
[providers.nomad.endpoint]
|
[providers.nomad.endpoint]
|
||||||
address = "http://127.0.0.1:4646/"
|
address = "http://<< env "attr.unique.network.ip-address" >>:4646"
|
||||||
|
|
||||||
<< if keyExists "traefik/acme/email" ->>
|
<< if nomadVarExists "nomad/jobs/traefik" ->>
|
||||||
[certificatesResolvers.letsEncrypt.acme]
|
[certificatesResolvers.letsEncrypt.acme]
|
||||||
email = "<< key "traefik/acme/email" >>"
|
email = "<< with nomadVar "nomad/jobs/traefik" >><< .acme_email >><< end >>"
|
||||||
# Store in /local because /secrets doesn't persist with ephemeral disk
|
# Store in /local because /secrets doesn't persist with ephemeral disk
|
||||||
storage = "/local/acme.json"
|
storage = "/local/acme.json"
|
||||||
[certificatesResolvers.letsEncrypt.acme.dnsChallenge]
|
[certificatesResolvers.letsEncrypt.acme.dnsChallenge]
|
||||||
provider = "cloudflare"
|
provider = "cloudflare"
|
||||||
resolvers = ["1.1.1.1:53", "8.8.8.8:53"]
|
resolvers = ["1.1.1.1:53", "8.8.8.8:53"]
|
||||||
delayBeforeCheck = 0
|
delayBeforeCheck = 0
|
||||||
<< end ->>
|
|
||||||
EOH
|
EOH
|
||||||
destination = "local/config/traefik.toml"
|
destination = "local/config/traefik.toml"
|
||||||
}
|
}
|
||||||
|
|
||||||
template {
|
template {
|
||||||
data = <<EOH
|
data = <<EOH
|
||||||
{{ with secret "kv/data/cloudflare" }}
|
{{ with nomadVar "nomad/jobs/traefik" -}}
|
||||||
CF_DNS_API_TOKEN={{ .Data.data.domain_lego_dns }}
|
CF_DNS_API_TOKEN={{ .cloudflare_api_token_dns_edit }}
|
||||||
CF_ZONE_API_TOKEN={{ .Data.data.domain_lego_dns }}
|
CF_ZONE_API_TOKEN={{ .cloudflare_api_token_zone_read }}
|
||||||
{{ end }}
|
{{- end }}
|
||||||
EOH
|
EOH
|
||||||
destination = "secrets/cloudflare.env"
|
destination = "secrets/cloudflare.env"
|
||||||
env = true
|
env = true
|
||||||
@ -197,46 +179,17 @@ CF_ZONE_API_TOKEN={{ .Data.data.domain_lego_dns }}
|
|||||||
[http.routers]
|
[http.routers]
|
||||||
[http.routers.nomad]
|
[http.routers.nomad]
|
||||||
entryPoints = ["websecure"]
|
entryPoints = ["websecure"]
|
||||||
# middlewares = []
|
|
||||||
service = "nomad"
|
service = "nomad"
|
||||||
rule = "Host(`nomad.{{ keyOrDefault "global/base_hostname" "${var.base_hostname}" }}`)"
|
rule = "Host(`nomad.{{ with nomadVar "nomad/jobs" }}{{ .base_hostname }}{{ end }}`)"
|
||||||
[http.routers.consul]
|
|
||||||
entryPoints = ["websecure"]
|
|
||||||
# middlewares = []
|
|
||||||
service = "consul"
|
|
||||||
rule = "Host(`consul.{{ keyOrDefault "global/base_hostname" "${var.base_hostname}" }}`)"
|
|
||||||
[http.routers.vault]
|
|
||||||
entryPoints = ["websecure"]
|
|
||||||
# middlewares = []
|
|
||||||
service = "vault"
|
|
||||||
rule = "Host(`vault.{{ keyOrDefault "global/base_hostname" "${var.base_hostname}" }}`)"
|
|
||||||
|
|
||||||
[http.services]
|
[http.services]
|
||||||
{{ with service "nomad-client" -}}
|
{{ with nomadService "nomad-client" -}}
|
||||||
[http.services.nomad]
|
[http.services.nomad]
|
||||||
[http.services.nomad.loadBalancer]
|
[http.services.nomad.loadBalancer]
|
||||||
{{ range . -}}
|
{{ range . -}}
|
||||||
[[http.services.nomad.loadBalancer.servers]]
|
[[http.services.nomad.loadBalancer.servers]]
|
||||||
url = "http://{{ .Address }}:{{ .Port }}"
|
url = "http://{{ .Address }}:{{ .Port }}"
|
||||||
{{ end }}
|
{{ end }}
|
||||||
{{- end }}
|
|
||||||
{{ with service "consul" -}}
|
|
||||||
[http.services.consul]
|
|
||||||
[http.services.consul.loadBalancer]
|
|
||||||
{{ range . -}}
|
|
||||||
[[http.services.consul.loadBalancer.servers]]
|
|
||||||
# Not using .Port because that's an RPC port
|
|
||||||
url = "http://{{ .Address }}:8500"
|
|
||||||
{{ end }}
|
|
||||||
{{- end }}
|
|
||||||
{{ with service "vault" -}}
|
|
||||||
[http.services.vault]
|
|
||||||
[http.services.vault.loadBalancer]
|
|
||||||
[http.services.vault.loadBalancer.sticky.cookie]
|
|
||||||
{{ range . -}}
|
|
||||||
[[http.services.vault.loadBalancer.servers]]
|
|
||||||
url = "http://{{ .Address }}:{{ .Port }}"
|
|
||||||
{{ end }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
EOH
|
EOH
|
||||||
destination = "local/config/conf/route-hashi.toml"
|
destination = "local/config/conf/route-hashi.toml"
|
||||||
@ -245,7 +198,7 @@ CF_ZONE_API_TOKEN={{ .Data.data.domain_lego_dns }}
|
|||||||
|
|
||||||
template {
|
template {
|
||||||
data = <<EOH
|
data = <<EOH
|
||||||
{{ with service "syslogng" -}}
|
{{ with nomadService "syslogng" -}}
|
||||||
[tcp.routers]
|
[tcp.routers]
|
||||||
[tcp.routers.syslogtcp]
|
[tcp.routers.syslogtcp]
|
||||||
entryPoints = ["syslogtcp"]
|
entryPoints = ["syslogtcp"]
|
||||||
@ -261,7 +214,7 @@ CF_ZONE_API_TOKEN={{ .Data.data.domain_lego_dns }}
|
|||||||
{{ end -}}
|
{{ end -}}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
|
|
||||||
{{ with service "syslogng" -}}
|
{{ with nomadService "syslogng" -}}
|
||||||
[udp.routers]
|
[udp.routers]
|
||||||
[udp.routers.syslogudp]
|
[udp.routers.syslogudp]
|
||||||
entryPoints = ["syslogudp"]
|
entryPoints = ["syslogudp"]
|
||||||
@ -283,9 +236,10 @@ CF_ZONE_API_TOKEN={{ .Data.data.domain_lego_dns }}
|
|||||||
template {
|
template {
|
||||||
data = <<EOH
|
data = <<EOH
|
||||||
[http.middlewares]
|
[http.middlewares]
|
||||||
{{ with secret "kv/data/traefik" }}
|
{{ with nomadVar "nomad/jobs/traefik" }}
|
||||||
{{ if .Data.data.usersfile }}
|
{{ if .usersfile }}
|
||||||
[http.middlewares.basic-auth.basicAuth]
|
[http.middlewares.basic-auth.basicAuth]
|
||||||
|
# TODO: Reference secrets mount
|
||||||
usersFile = "/etc/traefik/usersfile"
|
usersFile = "/etc/traefik/usersfile"
|
||||||
{{ end }}
|
{{ end }}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
@ -296,8 +250,8 @@ CF_ZONE_API_TOKEN={{ .Data.data.domain_lego_dns }}
|
|||||||
|
|
||||||
template {
|
template {
|
||||||
data = <<EOH
|
data = <<EOH
|
||||||
{{ with secret "kv/data/traefik" }}
|
{{ with nomadVar "nomad/jobs/traefik" }}
|
||||||
{{ .Data.data.usersfile }}
|
{{ .usersfile }}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
EOH
|
EOH
|
||||||
destination = "secrets/usersfile"
|
destination = "secrets/usersfile"
|
||||||
|
@ -9,7 +9,6 @@ job "adminer" {
|
|||||||
network {
|
network {
|
||||||
mode = "bridge"
|
mode = "bridge"
|
||||||
port "adminer" {
|
port "adminer" {
|
||||||
host_network = "loopback"
|
|
||||||
to = 8080
|
to = 8080
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -18,30 +17,6 @@ job "adminer" {
|
|||||||
name = "adminer"
|
name = "adminer"
|
||||||
port = "adminer"
|
port = "adminer"
|
||||||
|
|
||||||
connect {
|
|
||||||
sidecar_service {
|
|
||||||
proxy {
|
|
||||||
local_service_port = 8080
|
|
||||||
|
|
||||||
upstreams {
|
|
||||||
destination_name = "mysql-server"
|
|
||||||
local_bind_port = 4040
|
|
||||||
}
|
|
||||||
|
|
||||||
config {
|
|
||||||
protocol = "tcp"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecar_task {
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 25
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = [
|
tags = [
|
||||||
"traefik.enable=true",
|
"traefik.enable=true",
|
||||||
"traefik.http.routers.adminer.entryPoints=websecure",
|
"traefik.http.routers.adminer.entryPoints=websecure",
|
||||||
@ -56,8 +31,14 @@ job "adminer" {
|
|||||||
ports = ["adminer"]
|
ports = ["adminer"]
|
||||||
}
|
}
|
||||||
|
|
||||||
env = {
|
template {
|
||||||
"ADMINER_DEFAULT_SERVER" = "${NOMAD_UPSTREAM_ADDR_mysql_server}"
|
data = <<EOF
|
||||||
|
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-server" -}}
|
||||||
|
ADMINER_DEFAULT_SERVER={{ .Address }}:{{ .Port }}
|
||||||
|
{{- end }}
|
||||||
|
EOF
|
||||||
|
env = true
|
||||||
|
destination = "env"
|
||||||
}
|
}
|
||||||
|
|
||||||
resources {
|
resources {
|
||||||
|
@ -15,8 +15,8 @@ job "mysql-server" {
|
|||||||
|
|
||||||
network {
|
network {
|
||||||
mode = "bridge"
|
mode = "bridge"
|
||||||
|
|
||||||
port "db" {
|
port "db" {
|
||||||
host_network = "loopback"
|
|
||||||
to = 3306
|
to = 3306
|
||||||
}
|
}
|
||||||
port "envoy_metrics" {
|
port "envoy_metrics" {
|
||||||
@ -32,30 +32,8 @@ job "mysql-server" {
|
|||||||
|
|
||||||
service {
|
service {
|
||||||
name = "mysql-server"
|
name = "mysql-server"
|
||||||
|
provider = "nomad"
|
||||||
port = "db"
|
port = "db"
|
||||||
|
|
||||||
connect {
|
|
||||||
sidecar_service {
|
|
||||||
proxy {
|
|
||||||
local_service_port = 3306
|
|
||||||
|
|
||||||
config {
|
|
||||||
envoy_prometheus_bind_addr = "0.0.0.0:9123"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecar_task {
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 50
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
meta {
|
|
||||||
envoy_metrics_addr = "${NOMAD_ADDR_envoy_metrics}"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
task "mysql-server" {
|
task "mysql-server" {
|
||||||
@ -67,13 +45,6 @@ job "mysql-server" {
|
|||||||
args = ["--innodb-buffer-pool-size=1G"]
|
args = ["--innodb-buffer-pool-size=1G"]
|
||||||
}
|
}
|
||||||
|
|
||||||
vault {
|
|
||||||
policies = [
|
|
||||||
"access-tables",
|
|
||||||
"nomad-task",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
volume_mount {
|
volume_mount {
|
||||||
volume = "mysql-data"
|
volume = "mysql-data"
|
||||||
destination = "/var/lib/mysql"
|
destination = "/var/lib/mysql"
|
||||||
@ -87,8 +58,8 @@ job "mysql-server" {
|
|||||||
|
|
||||||
template {
|
template {
|
||||||
data = <<EOH
|
data = <<EOH
|
||||||
{{ with secret "kv/data/mysql" }}
|
{{ with nomadVar "nomad/jobs" }}
|
||||||
MYSQL_ROOT_PASSWORD={{ .Data.data.root_password }}
|
MYSQL_ROOT_PASSWORD={{ .mysql_root_password }}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
EOH
|
EOH
|
||||||
destination = "secrets/db.env"
|
destination = "secrets/db.env"
|
||||||
|
@ -16,50 +16,3 @@ resource "nomad_job" "adminer" {
|
|||||||
|
|
||||||
jobspec = file("${path.module}/adminer.nomad")
|
jobspec = file("${path.module}/adminer.nomad")
|
||||||
}
|
}
|
||||||
|
|
||||||
# NOTE: This may need to be moved to after the services are created
|
|
||||||
resource "consul_config_entry" "mysql_intents" {
|
|
||||||
name = "mysql-server"
|
|
||||||
kind = "service-intentions"
|
|
||||||
|
|
||||||
config_json = jsonencode({
|
|
||||||
Sources = [
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "adminer"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "nextcloud"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "backups"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "grafana"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "blocky-api"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "photoprism"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
@ -16,7 +16,6 @@ job "redis" {
|
|||||||
mode = "bridge"
|
mode = "bridge"
|
||||||
|
|
||||||
port "main" {
|
port "main" {
|
||||||
host_network = "loopback"
|
|
||||||
to = 6379
|
to = 6379
|
||||||
}
|
}
|
||||||
port "envoy_metrics" {
|
port "envoy_metrics" {
|
||||||
@ -26,27 +25,9 @@ job "redis" {
|
|||||||
|
|
||||||
service {
|
service {
|
||||||
name = "redis"
|
name = "redis"
|
||||||
|
provider = "nomad"
|
||||||
port = "main"
|
port = "main"
|
||||||
|
|
||||||
connect {
|
|
||||||
sidecar_service {
|
|
||||||
proxy {
|
|
||||||
local_service_port = 6379
|
|
||||||
|
|
||||||
config {
|
|
||||||
envoy_prometheus_bind_addr = "0.0.0.0:9123"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecar_task {
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 50
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# check {
|
# check {
|
||||||
# name = "alive"
|
# name = "alive"
|
||||||
# type = "tcp"
|
# type = "tcp"
|
||||||
|
@ -16,31 +16,3 @@ resource "nomad_job" "rediscommander" {
|
|||||||
|
|
||||||
jobspec = file("${path.module}/rediscommander.nomad")
|
jobspec = file("${path.module}/rediscommander.nomad")
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "consul_config_entry" "redis_intents" {
|
|
||||||
name = "redis"
|
|
||||||
kind = "service-intentions"
|
|
||||||
|
|
||||||
config_json = jsonencode({
|
|
||||||
Sources = [
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "blocky-api"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "rediscommander"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action = "allow"
|
|
||||||
Name = "authelia"
|
|
||||||
Precedence = 9
|
|
||||||
Type = "consul"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
@ -9,7 +9,6 @@ job "rediscommander" {
|
|||||||
mode = "bridge"
|
mode = "bridge"
|
||||||
|
|
||||||
port "main" {
|
port "main" {
|
||||||
host_network = "loopback"
|
|
||||||
to = 8081
|
to = 8081
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -18,26 +17,6 @@ job "rediscommander" {
|
|||||||
name = "rediscommander"
|
name = "rediscommander"
|
||||||
port = "main"
|
port = "main"
|
||||||
|
|
||||||
connect {
|
|
||||||
sidecar_service {
|
|
||||||
proxy {
|
|
||||||
local_service_port = 8081
|
|
||||||
|
|
||||||
upstreams {
|
|
||||||
destination_name = "redis"
|
|
||||||
local_bind_port = 6379
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecar_task {
|
|
||||||
resources {
|
|
||||||
cpu = 50
|
|
||||||
memory = 25
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = [
|
tags = [
|
||||||
"traefik.enable=true",
|
"traefik.enable=true",
|
||||||
"traefik.http.routers.rediscommander.entryPoints=websecure",
|
"traefik.http.routers.rediscommander.entryPoints=websecure",
|
||||||
@ -52,8 +31,14 @@ job "rediscommander" {
|
|||||||
ports = ["main"]
|
ports = ["main"]
|
||||||
}
|
}
|
||||||
|
|
||||||
env = {
|
template {
|
||||||
"REDIS_HOSTS" = "local:${NOMAD_UPSTREAM_ADDR_redis}"
|
data = <<EOH
|
||||||
|
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "redis" -}}
|
||||||
|
REDIS_HOSTS=local:{{ .Address }}:{{ .Port }}
|
||||||
|
{{- end }}
|
||||||
|
EOH
|
||||||
|
env = true
|
||||||
|
destination = "env"
|
||||||
}
|
}
|
||||||
|
|
||||||
resources {
|
resources {
|
||||||
|
73
nomad_vars.py
Executable file
73
nomad_vars.py
Executable file
@ -0,0 +1,73 @@
|
|||||||
|
#! /usr/bin/env python3
|
||||||
|
from collections import defaultdict
|
||||||
|
from os import getenv
|
||||||
|
|
||||||
|
import requests
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
NOMAD_ADDR = getenv("NOMAD_ADDR", "http://127.0.0.1:4646")
|
||||||
|
NOMAD_TOKEN = getenv("NOMAD_TOKEN")
|
||||||
|
|
||||||
|
|
||||||
|
def write_var(path: str, items: dict[str, str | float | int]) -> dict:
|
||||||
|
headers = {}
|
||||||
|
if NOMAD_TOKEN:
|
||||||
|
headers["X-Nomad-Token"] = NOMAD_TOKEN
|
||||||
|
|
||||||
|
result = requests.post(
|
||||||
|
f"{NOMAD_ADDR}/v1/var/{path}",
|
||||||
|
headers=headers,
|
||||||
|
json={
|
||||||
|
"Path": path,
|
||||||
|
"Items": {k: str(v) for k, v in items.items()},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
print(result.text)
|
||||||
|
result.raise_for_status()
|
||||||
|
|
||||||
|
return result.json()
|
||||||
|
|
||||||
|
|
||||||
|
def write_consul():
|
||||||
|
with open("./ansible_playbooks/vars/consul_values.yml") as f:
|
||||||
|
vars = yaml.load(f, yaml.CLoader)["consul_values"]
|
||||||
|
|
||||||
|
key_values = defaultdict(list)
|
||||||
|
for path, value in vars.items():
|
||||||
|
path, _, item = path.rpartition("/")
|
||||||
|
key_values[path].append((item, value))
|
||||||
|
|
||||||
|
for path, items in key_values.items():
|
||||||
|
print("path", path, "items", items)
|
||||||
|
response = write_var(path, dict(items))
|
||||||
|
print(response)
|
||||||
|
|
||||||
|
|
||||||
|
def write_vault():
|
||||||
|
with open("./ansible_playbooks/vars/vault_hashi_vault_values.yml") as f:
|
||||||
|
vars = yaml.load(f, yaml.CLoader)["hashi_vault_values"]
|
||||||
|
prefix = "secrets/"
|
||||||
|
|
||||||
|
for path, items in vars.items():
|
||||||
|
print("path", path, "items", items)
|
||||||
|
response = write_var(prefix + path, items)
|
||||||
|
print(response)
|
||||||
|
|
||||||
|
def write_nomad():
|
||||||
|
with open("./ansible_playbooks/vars/nomad_vars.yml") as f:
|
||||||
|
vars = yaml.load(f, yaml.CLoader)
|
||||||
|
|
||||||
|
for path, items in vars.items():
|
||||||
|
print("path", path, "items", items)
|
||||||
|
response = write_var(path, items)
|
||||||
|
print(response)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
write_nomad()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
41
providers.tf
41
providers.tf
@ -1,45 +1,6 @@
|
|||||||
# Configure Consul provider
|
|
||||||
provider "consul" {
|
|
||||||
address = var.consul_address
|
|
||||||
}
|
|
||||||
|
|
||||||
# Get Nomad client from Consul
|
|
||||||
data "consul_service" "nomad" {
|
|
||||||
name = "nomad-client"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Get Vault client from Consul
|
|
||||||
data "consul_service" "vault" {
|
|
||||||
name = "vault"
|
|
||||||
tag = "active"
|
|
||||||
}
|
|
||||||
|
|
||||||
locals {
|
|
||||||
# Get Nomad address from Consul
|
|
||||||
nomad_node = data.consul_service.nomad.service[0]
|
|
||||||
nomad_node_address = "http://${local.nomad_node.node_address}:${local.nomad_node.port}"
|
|
||||||
|
|
||||||
# Get Vault address from Consul
|
|
||||||
vault_node = data.consul_service.vault.service[0]
|
|
||||||
vault_node_address = "http://${local.vault_node.node_address}:${local.vault_node.port}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Configure the Vault provider
|
|
||||||
provider "vault" {
|
|
||||||
address = length(var.vault_address) == 0 ? local.vault_node_address : var.vault_address
|
|
||||||
token = var.vault_token
|
|
||||||
}
|
|
||||||
|
|
||||||
# Something that should exist in a post bootstrap module, right now module includes bootstrapping
|
|
||||||
# which requries Admin
|
|
||||||
# data "vault_nomad_access_token" "deploy" {
|
|
||||||
# backend = "nomad"
|
|
||||||
# role = "deploy"
|
|
||||||
# }
|
|
||||||
|
|
||||||
# Configure the Nomad provider
|
# Configure the Nomad provider
|
||||||
provider "nomad" {
|
provider "nomad" {
|
||||||
address = length(var.nomad_address) == 0 ? local.nomad_node_address : var.nomad_address
|
address = var.nomad_address
|
||||||
secret_id = var.nomad_secret_id
|
secret_id = var.nomad_secret_id
|
||||||
# secret_id = length(var.nomad_secret_id) == 0 ? data.vault_nomad_access_token.admin.secret_id : var.nomad_secret_id
|
# secret_id = length(var.nomad_secret_id) == 0 ? data.vault_nomad_access_token.admin.secret_id : var.nomad_secret_id
|
||||||
region = "global"
|
region = "global"
|
||||||
|
10
services.tf
10
services.tf
@ -1,5 +1,5 @@
|
|||||||
module "services" {
|
# module "services" {
|
||||||
source = "./services"
|
# source = "./services"
|
||||||
|
#
|
||||||
depends_on = [module.databases, module.core]
|
# depends_on = [module.databases, module.core]
|
||||||
}
|
# }
|
||||||
|
18
vars.tf
18
vars.tf
@ -1,16 +1,6 @@
|
|||||||
variable "consul_address" {
|
|
||||||
type = string
|
|
||||||
default = "http://n1.thefij:8500"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "vault_address" {
|
|
||||||
type = string
|
|
||||||
default = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "nomad_address" {
|
variable "nomad_address" {
|
||||||
type = string
|
type = string
|
||||||
default = ""
|
default = "http://n1.thefij:4646"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "base_hostname" {
|
variable "base_hostname" {
|
||||||
@ -25,9 +15,3 @@ variable "nomad_secret_id" {
|
|||||||
sensitive = true
|
sensitive = true
|
||||||
default = ""
|
default = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "vault_token" {
|
|
||||||
type = string
|
|
||||||
sensitive = true
|
|
||||||
default = ""
|
|
||||||
}
|
|
||||||
|
Loading…
Reference in New Issue
Block a user