Compare commits
1 Commits
main
...
vault-oidc
Author | SHA1 | Date | |
---|---|---|---|
40b0776ce9 |
@ -115,10 +115,10 @@
|
||||
}
|
||||
],
|
||||
"results": {
|
||||
"nomad/core/metrics/grafana/grafana.ini": [
|
||||
"nomad/metrics/grafana/grafana.ini": [
|
||||
{
|
||||
"type": "Basic Auth Credentials",
|
||||
"filename": "nomad/core/metrics/grafana/grafana.ini",
|
||||
"filename": "nomad/metrics/grafana/grafana.ini",
|
||||
"hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4",
|
||||
"is_verified": false,
|
||||
"line_number": 78,
|
||||
@ -126,7 +126,7 @@
|
||||
},
|
||||
{
|
||||
"type": "Secret Keyword",
|
||||
"filename": "nomad/core/metrics/grafana/grafana.ini",
|
||||
"filename": "nomad/metrics/grafana/grafana.ini",
|
||||
"hashed_secret": "55ebda65c08313526e7ba08ad733e5ebea9900bd",
|
||||
"is_verified": false,
|
||||
"line_number": 109,
|
||||
@ -134,7 +134,7 @@
|
||||
},
|
||||
{
|
||||
"type": "Secret Keyword",
|
||||
"filename": "nomad/core/metrics/grafana/grafana.ini",
|
||||
"filename": "nomad/metrics/grafana/grafana.ini",
|
||||
"hashed_secret": "d033e22ae348aeb5660fc2140aec35850c4da997",
|
||||
"is_verified": false,
|
||||
"line_number": 151,
|
||||
@ -142,7 +142,7 @@
|
||||
},
|
||||
{
|
||||
"type": "Secret Keyword",
|
||||
"filename": "nomad/core/metrics/grafana/grafana.ini",
|
||||
"filename": "nomad/metrics/grafana/grafana.ini",
|
||||
"hashed_secret": "10bea62ff1e1a7540dc7a6bc10f5fa992349023f",
|
||||
"is_verified": false,
|
||||
"line_number": 154,
|
||||
@ -150,7 +150,7 @@
|
||||
},
|
||||
{
|
||||
"type": "Secret Keyword",
|
||||
"filename": "nomad/core/metrics/grafana/grafana.ini",
|
||||
"filename": "nomad/metrics/grafana/grafana.ini",
|
||||
"hashed_secret": "5718bce97710e6be87ea160b36eaefb5032857d3",
|
||||
"is_verified": false,
|
||||
"line_number": 239,
|
||||
@ -158,28 +158,28 @@
|
||||
},
|
||||
{
|
||||
"type": "Secret Keyword",
|
||||
"filename": "nomad/core/metrics/grafana/grafana.ini",
|
||||
"filename": "nomad/metrics/grafana/grafana.ini",
|
||||
"hashed_secret": "10aed9d7ebef778a9b3033dba3f7813b639e0d50",
|
||||
"is_verified": false,
|
||||
"line_number": 252,
|
||||
"is_secret": false
|
||||
}
|
||||
],
|
||||
"nomad/core/syslogng.nomad": [
|
||||
"nomad/syslogng.nomad": [
|
||||
{
|
||||
"type": "Base64 High Entropy String",
|
||||
"filename": "nomad/core/syslogng.nomad",
|
||||
"filename": "nomad/syslogng.nomad",
|
||||
"hashed_secret": "298b5925fe7c7458cb8a12a74621fdedafea5ad6",
|
||||
"is_verified": false,
|
||||
"line_number": 159,
|
||||
"line_number": 163,
|
||||
"is_secret": false
|
||||
},
|
||||
{
|
||||
"type": "Base64 High Entropy String",
|
||||
"filename": "nomad/core/syslogng.nomad",
|
||||
"filename": "nomad/syslogng.nomad",
|
||||
"hashed_secret": "3a1cec2d3c3de7e4da4d99c6731ca696c24b72b4",
|
||||
"is_verified": false,
|
||||
"line_number": 159,
|
||||
"line_number": 163,
|
||||
"is_secret": false
|
||||
}
|
||||
],
|
||||
@ -210,5 +210,5 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"generated_at": "2022-10-27T21:28:03Z"
|
||||
"generated_at": "2022-07-27T03:09:38Z"
|
||||
}
|
||||
|
@ -75,11 +75,6 @@ bootstrap-values: venv/bin/ansible galaxy
|
||||
$(shell test -f vault-keys.json && echo '-e "@vault-keys.json"') \
|
||||
-i ansible_hosts.yml -M ./roles ./bootstrap-values.yml
|
||||
|
||||
.PHONY: unseal-vault
|
||||
unseal-vault: venv/bin/ansible galaxy
|
||||
env VIRTUAL_ENV=/Users/ifij/workspace/iamthefij/orchestration-tests/nomad/venv ./venv/bin/ansible-playbook -K -vv \
|
||||
-e "@vault-keys.json" -i ansible_hosts.yml -M ./roles ./unseal-vault.yml
|
||||
|
||||
.PHONY: init
|
||||
init:
|
||||
@terraform init
|
||||
|
12
nomad/acls/acls.tf
Normal file
12
nomad/acls/acls.tf
Normal file
@ -0,0 +1,12 @@
|
||||
resource "nomad_acl_policy" "create_post_bootstrap_policy" {
|
||||
# count = can(tobool(var.nomad_secret_id)) ? 1 : 0
|
||||
name = "anonymous"
|
||||
description = "Anon RW"
|
||||
rules_hcl = file("${path.module}/nomad-anon-bootstrap.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_acl_policy" "admin" {
|
||||
name = "admin"
|
||||
description = "admin policy with access to everything"
|
||||
rules_hcl = file("${path.module}/nomad-anon-bootstrap.hcl")
|
||||
}
|
@ -1,23 +0,0 @@
|
||||
namespace "*" {
|
||||
policy = "read"
|
||||
}
|
||||
|
||||
agent {
|
||||
policy = "read"
|
||||
}
|
||||
|
||||
operator {
|
||||
policy = "read"
|
||||
}
|
||||
|
||||
quota {
|
||||
policy = "read"
|
||||
}
|
||||
|
||||
node {
|
||||
policy = "read"
|
||||
}
|
||||
|
||||
host_volume "*" {
|
||||
policy = "read"
|
||||
}
|
@ -1,4 +0,0 @@
|
||||
namespace "*" {
|
||||
policy = "read"
|
||||
capabilities = ["submit-job", "dispatch-job", "read-logs"]
|
||||
}
|
@ -1,18 +0,0 @@
|
||||
resource "nomad_acl_policy" "anon_policy" {
|
||||
name = "anonymous"
|
||||
description = "Anon RO"
|
||||
rules_hcl = file("${path.module}/nomad-anon-policy.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_acl_policy" "admin" {
|
||||
name = "admin"
|
||||
description = "Admin RW for admins"
|
||||
rules_hcl = file("${path.module}/nomad-admin-policy.hcl")
|
||||
}
|
||||
|
||||
# TODO: (security) Limit this scope
|
||||
resource "nomad_acl_policy" "deploy" {
|
||||
name = "deploy"
|
||||
description = "Write for job deployments"
|
||||
rules_hcl = file("${path.module}/nomad-deploy-policy.hcl")
|
||||
}
|
@ -8,33 +8,27 @@ resource "vault_nomad_secret_backend" "config" {
|
||||
backend = "nomad"
|
||||
description = "Nomad ACL"
|
||||
token = nomad_acl_token.vault.secret_id
|
||||
|
||||
default_lease_ttl_seconds = "3600"
|
||||
max_lease_ttl_seconds = "7200"
|
||||
|
||||
ttl = "3600"
|
||||
max_ttl = "7200"
|
||||
}
|
||||
|
||||
# Vault roles generating Nomad tokens
|
||||
resource "vault_nomad_secret_role" "nomad-deploy" {
|
||||
backend = vault_nomad_secret_backend.config.backend
|
||||
role = "nomad-deploy"
|
||||
# Nomad policies
|
||||
policies = ["deploy"]
|
||||
backend = vault_nomad_secret_backend.config.backend
|
||||
role = "nomad-deploy"
|
||||
policies = ["nomad-deploy"]
|
||||
}
|
||||
|
||||
resource "vault_nomad_secret_role" "admin-management" {
|
||||
resource "vault_nomad_secret_role" "admin" {
|
||||
backend = vault_nomad_secret_backend.config.backend
|
||||
role = "admin-management"
|
||||
type = "management"
|
||||
}
|
||||
|
||||
resource "vault_nomad_secret_role" "admin" {
|
||||
backend = vault_nomad_secret_backend.config.backend
|
||||
role = "admin"
|
||||
# Nomad policies
|
||||
policies = ["admin"]
|
||||
resource "vault_policy" "nomad-deploy" {
|
||||
name = "nomad-deploy"
|
||||
policy = <<EOH
|
||||
path "nomad/creds/nomad-deploy" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
EOH
|
||||
}
|
||||
|
||||
# Nomad Vault token access
|
||||
@ -46,3 +40,76 @@ resource "vault_token_auth_backend_role" "nomad-cluster" {
|
||||
token_period = 259200
|
||||
renewable = true
|
||||
}
|
||||
|
||||
# Policy for clusters
|
||||
resource "vault_policy" "nomad-task" {
|
||||
name = "nomad-task"
|
||||
policy = <<EOH
|
||||
# This section grants all access on "secret/*". Further restrictions can be
|
||||
# applied to this broad policy, as shown below.
|
||||
path "kv/data/*" {
|
||||
capabilities = ["create", "read", "update", "delete", "list"]
|
||||
}
|
||||
EOH
|
||||
}
|
||||
|
||||
# Policy for nomad tokens
|
||||
resource "vault_policy" "nomad-token" {
|
||||
name = "nomad-server"
|
||||
policy = <<EOH
|
||||
# Allow creating tokens under "nomad-cluster" token role. The token role name
|
||||
# should be updated if "nomad-cluster" is not used.
|
||||
path "auth/token/create/nomad-cluster" {
|
||||
capabilities = ["update"]
|
||||
}
|
||||
|
||||
# Allow looking up "nomad-cluster" token role. The token role name should be
|
||||
# updated if "nomad-cluster" is not used.
|
||||
path "auth/token/roles/nomad-cluster" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
|
||||
# Allow looking up the token passed to Nomad to validate # the token has the
|
||||
# proper capabilities. This is provided by the "default" policy.
|
||||
path "auth/token/lookup-self" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
|
||||
# Allow looking up incoming tokens to validate they have permissions to access
|
||||
# the tokens they are requesting. This is only required if
|
||||
# `allow_unauthenticated` is set to false.
|
||||
path "auth/token/lookup" {
|
||||
capabilities = ["update"]
|
||||
}
|
||||
|
||||
# Allow revoking tokens that should no longer exist. This allows revoking
|
||||
# tokens for dead tasks.
|
||||
path "auth/token/revoke-accessor" {
|
||||
capabilities = ["update"]
|
||||
}
|
||||
|
||||
# Allow checking the capabilities of our own token. This is used to validate the
|
||||
# token upon startup.
|
||||
path "sys/capabilities-self" {
|
||||
capabilities = ["update"]
|
||||
}
|
||||
|
||||
# Allow our own token to be renewed.
|
||||
path "auth/token/renew-self" {
|
||||
capabilities = ["update"]
|
||||
}
|
||||
|
||||
# This section grants all access on "secret/*". Further restrictions can be
|
||||
# applied to this broad policy, as shown below.
|
||||
path "kv/data/*" {
|
||||
capabilities = ["create", "read", "update", "delete", "list"]
|
||||
}
|
||||
EOH
|
||||
}
|
||||
|
||||
# Create a vault token for Nomad
|
||||
# resource "vault_token" "nomad-token" {
|
||||
# policies = ["nomad-server"]
|
||||
# period = "72h"
|
||||
# no_parent = true
|
||||
# }
|
||||
|
@ -10,7 +10,7 @@
|
||||
#
|
||||
# mysql {
|
||||
# # How to give access here?
|
||||
# connection_url = "{{username}}:{{password}}@tcp(mysql-server.service.consul:3306)"
|
||||
# connection_url = "{{username}}:{{password}}@tcp(localhost:3306)"
|
||||
# username = ""
|
||||
# password = ""
|
||||
# }
|
||||
|
@ -15,3 +15,8 @@ variable "vault_token" {
|
||||
sensitive = true
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "vault_admin_password" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
@ -6,3 +6,19 @@ resource "vault_auth_backend" "userpass" {
|
||||
listing_visibility = "unauth"
|
||||
}
|
||||
}
|
||||
|
||||
resource "vault_generic_secret" "admin_user" {
|
||||
path = "auth/userpass/users/admin"
|
||||
|
||||
data_json = <<EOT
|
||||
{
|
||||
"password": "${var.vault_admin_password}",
|
||||
"policies": "admin"
|
||||
}
|
||||
EOT
|
||||
|
||||
depends_on = [
|
||||
vault_auth_backend.userpass,
|
||||
vault_policy.admin,
|
||||
]
|
||||
}
|
||||
|
60
nomad/acls/vault_oidc_provider.tf
Normal file
60
nomad/acls/vault_oidc_provider.tf
Normal file
@ -0,0 +1,60 @@
|
||||
# Create an identity for the admin user
|
||||
resource "vault_identity_entity" "admin" {
|
||||
name = "admin"
|
||||
policies = ["admin"]
|
||||
metadata = {
|
||||
email = "admin@example.com"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
vault_policy.admin,
|
||||
vault_generic_secret.admin_user,
|
||||
]
|
||||
}
|
||||
|
||||
# Tie the identity to the userpass
|
||||
resource "vault_identity_entity_alias" "admin" {
|
||||
name = "admin"
|
||||
mount_accessor = vault_auth_backend.userpass.accessor
|
||||
canonical_id = vault_identity_entity.admin.id
|
||||
}
|
||||
|
||||
# Tie the identity to a group
|
||||
resource "vault_identity_group" "admins" {
|
||||
name = "admins"
|
||||
member_entity_ids = [vault_identity_entity.admin.id]
|
||||
}
|
||||
|
||||
# Create an oidc client
|
||||
resource "vault_identity_oidc_assignment" "everyone" {
|
||||
name = "everyone"
|
||||
entity_ids = [
|
||||
vault_identity_entity.admin.id,
|
||||
]
|
||||
group_ids = [
|
||||
vault_identity_group.admins.id,
|
||||
]
|
||||
}
|
||||
|
||||
resource "vault_identity_oidc_key" "key" {
|
||||
name = "key"
|
||||
algorithm = "RS256"
|
||||
rotation_period = 3600
|
||||
verification_ttl = 7200
|
||||
allowed_client_ids = ["*"]
|
||||
}
|
||||
|
||||
resource "vault_identity_oidc_client" "consul" {
|
||||
name = "consul"
|
||||
redirect_uris = [
|
||||
"http://127.0.0.1:9200/v1/auth-methods/oidc:authenticate:callback",
|
||||
"http://127.0.0.1:8251/callback",
|
||||
"http://127.0.0.1:8080/callback"
|
||||
]
|
||||
assignments = [
|
||||
vault_identity_oidc_assignment.everyone.name
|
||||
]
|
||||
key = vault_identity_oidc_key.key.name
|
||||
id_token_ttl = 2400
|
||||
access_token_ttl = 7200
|
||||
}
|
@ -7,77 +7,3 @@ path "*" {
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "vault_policy" "nomad-deploy" {
|
||||
name = "nomad-deploy"
|
||||
policy = <<EOH
|
||||
path "nomad/creds/nomad-deploy" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
EOH
|
||||
}
|
||||
|
||||
# Policy for clusters
|
||||
resource "vault_policy" "nomad-task" {
|
||||
name = "nomad-task"
|
||||
policy = <<EOH
|
||||
path "kv/data/*" {
|
||||
# Does this need create, update, delete?
|
||||
capabilities = ["create", "read", "update", "delete", "list"]
|
||||
}
|
||||
EOH
|
||||
}
|
||||
|
||||
# Policy for nomad tokens
|
||||
resource "vault_policy" "nomad-server" {
|
||||
name = "nomad-server"
|
||||
policy = <<EOH
|
||||
# Allow creating tokens under "nomad-cluster" token role. The token role name
|
||||
# should be updated if "nomad-cluster" is not used.
|
||||
path "auth/token/create/nomad-cluster" {
|
||||
capabilities = ["update"]
|
||||
}
|
||||
|
||||
# Allow looking up "nomad-cluster" token role. The token role name should be
|
||||
# updated if "nomad-cluster" is not used.
|
||||
path "auth/token/roles/nomad-cluster" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
|
||||
# Allow looking up the token passed to Nomad to validate # the token has the
|
||||
# proper capabilities. This is provided by the "default" policy.
|
||||
path "auth/token/lookup-self" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
|
||||
# Allow looking up incoming tokens to validate they have permissions to access
|
||||
# the tokens they are requesting. This is only required if
|
||||
# `allow_unauthenticated` is set to false.
|
||||
path "auth/token/lookup" {
|
||||
capabilities = ["update"]
|
||||
}
|
||||
|
||||
# Allow revoking tokens that should no longer exist. This allows revoking
|
||||
# tokens for dead tasks.
|
||||
path "auth/token/revoke-accessor" {
|
||||
capabilities = ["update"]
|
||||
}
|
||||
|
||||
# Allow checking the capabilities of our own token. This is used to validate the
|
||||
# token upon startup.
|
||||
path "sys/capabilities-self" {
|
||||
capabilities = ["update"]
|
||||
}
|
||||
|
||||
# Allow our own token to be renewed.
|
||||
path "auth/token/renew-self" {
|
||||
capabilities = ["update"]
|
||||
}
|
||||
|
||||
# This section grants all access on "secret/*". Further restrictions can be
|
||||
# applied to this broad policy, as shown below.
|
||||
path "kv/data/*" {
|
||||
capabilities = ["create", "read", "update", "delete", "list"]
|
||||
}
|
||||
EOH
|
||||
}
|
||||
|
@ -13,14 +13,7 @@ all:
|
||||
group: "bin"
|
||||
mode: "0755"
|
||||
read_only: false
|
||||
- name: lldap-data
|
||||
path: /srv/volumes/lldap
|
||||
owner: "root"
|
||||
group: "bin"
|
||||
mode: "0755"
|
||||
read_only: false
|
||||
n2.thefij:
|
||||
nomad_node_class: ingress
|
||||
nomad_node_role: both
|
||||
nomad_unique_host_volumes:
|
||||
- name: nextcloud-data
|
||||
@ -35,25 +28,15 @@ all:
|
||||
group: "bin"
|
||||
mode: "0755"
|
||||
read_only: false
|
||||
- name: sonarr-data
|
||||
path: /srv/volumes/sonarr
|
||||
- name: authentik-data
|
||||
path: /srv/volumes/gitea
|
||||
owner: "root"
|
||||
group: "bin"
|
||||
mode: "0755"
|
||||
read_only: false
|
||||
- name: nzbget-data
|
||||
path: /srv/volumes/nzbget
|
||||
owner: "root"
|
||||
group: "bin"
|
||||
mode: "0755"
|
||||
read_only: false
|
||||
# n3.thefij:
|
||||
# nomad_node_class: ingress
|
||||
# nomad_node_role: both
|
||||
# pi3:
|
||||
# nomad_node_role: client
|
||||
# pi4:
|
||||
# nomad_node_role: client
|
||||
n3.thefij:
|
||||
nomad_node_class: ingress
|
||||
nomad_node_role: both
|
||||
|
||||
consul_instances:
|
||||
children:
|
||||
|
155
nomad/backups/backup.nomad
Normal file
155
nomad/backups/backup.nomad
Normal file
@ -0,0 +1,155 @@
|
||||
variable "nextcloud_backup" {
|
||||
type = string
|
||||
description = "HCL config for Restic Scheduler jobs"
|
||||
}
|
||||
|
||||
variable "consul_backup" {
|
||||
type = string
|
||||
description = "HCL config for Restic Scheduler jobs"
|
||||
}
|
||||
|
||||
job "backup" {
|
||||
datacenters = ["dc1"]
|
||||
type = "system"
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
# Only node with a backup job so far
|
||||
# Remove when backing up all nodes
|
||||
value = "n2"
|
||||
}
|
||||
|
||||
group "backup" {
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
|
||||
port "metrics" {
|
||||
to = 8080
|
||||
}
|
||||
}
|
||||
|
||||
volume "all-volumes" {
|
||||
type = "host"
|
||||
read_only = true
|
||||
source = "all-volumes"
|
||||
}
|
||||
|
||||
service {
|
||||
port = "metrics"
|
||||
|
||||
# Add connect to mysql
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 8080
|
||||
|
||||
upstreams {
|
||||
destination_name = "mysql-server"
|
||||
local_bind_port = 6060
|
||||
}
|
||||
|
||||
config {
|
||||
protocol = "tcp"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 50
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
meta {
|
||||
metrics_addr = "${NOMAD_ADDR_metrics}"
|
||||
}
|
||||
}
|
||||
|
||||
task "backup" {
|
||||
driver = "docker"
|
||||
|
||||
volume_mount {
|
||||
volume = "all-volumes"
|
||||
destination = "/data"
|
||||
read_only = true
|
||||
}
|
||||
|
||||
config {
|
||||
image = "iamthefij/resticscheduler"
|
||||
ports = ["metrics"]
|
||||
args = [
|
||||
"/jobs/node-jobs.hcl",
|
||||
]
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
target = "/jobs"
|
||||
source = "jobs"
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
policies = [
|
||||
"access-tables",
|
||||
"nomad-task",
|
||||
]
|
||||
}
|
||||
|
||||
env = {
|
||||
"MYSQL_HOST" = "${NOMAD_UPSTREAM_IP_mysql_server}"
|
||||
"MYSQL_PORT" = "${NOMAD_UPSTREAM_PORT_mysql_server}"
|
||||
}
|
||||
|
||||
template {
|
||||
# Probably want to use database credentials that have access to dump all tables
|
||||
data = <<EOF
|
||||
{{ with secret "kv/data/nextcloud" }}
|
||||
MYSQL_DATABASE={{ .Data.data.db_name }}
|
||||
MYSQL_USER={{ .Data.data.db_user }}
|
||||
MYSQL_PASSWORD={{ .Data.data.db_pass }}
|
||||
{{ end }}
|
||||
{{ with secret "kv/data/backups" }}
|
||||
BACKUP_PASSPHRASE={{ .Data.data.backup_passphrase }}
|
||||
{{ end }}
|
||||
EOF
|
||||
destination = "secrets/db.env"
|
||||
env = true
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
CONSUL_HTTP_ADDR={{ env "attr.unique.network.ip-address" }}:8500
|
||||
EOH
|
||||
destination = "local/consul.env"
|
||||
env = true
|
||||
}
|
||||
|
||||
|
||||
template {
|
||||
# Build jobs based on node
|
||||
data = <<EOF
|
||||
# Current node is {{ env "node.unique.name" }}
|
||||
# Consul backup below?
|
||||
{{ if eq (env "node.unique.name") "n2" -}}
|
||||
# Consul backup
|
||||
${var.consul_backup}
|
||||
{{ end -}}
|
||||
{{ range service "nextcloud" }}
|
||||
# Nextcloud .Node {{ .Node }}
|
||||
{{ if eq .Node (env "node.unique.name") }}
|
||||
${var.nextcloud_backup}
|
||||
{{ end }}{{ end }}
|
||||
EOF
|
||||
destination = "jobs/node-jobs.hcl"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 256
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
27
nomad/backups/backups.tf
Normal file
27
nomad/backups/backups.tf
Normal file
@ -0,0 +1,27 @@
|
||||
locals {
|
||||
nextcloud_backup = file("${path.module}/jobs/nextcloud.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_job" "backups" {
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
"nextcloud_backup" = "${local.nextcloud_backup}",
|
||||
"consul_backup" = file("${path.module}/jobs/consul.hcl"),
|
||||
}
|
||||
}
|
||||
|
||||
jobspec = file("${path.module}/backup.nomad")
|
||||
}
|
||||
|
||||
resource "nomad_job" "backups-oneoff" {
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
"nextcloud_backup" = "${local.nextcloud_backup}",
|
||||
"consul_backup" = file("${path.module}/jobs/consul.hcl"),
|
||||
}
|
||||
}
|
||||
|
||||
jobspec = file("${path.module}/oneoff.nomad")
|
||||
}
|
@ -1,11 +1,23 @@
|
||||
job "Consul" {
|
||||
schedule = "0 * * * *"
|
||||
schedule = "* * * * *"
|
||||
|
||||
config {
|
||||
repo = "rclone::ftp,env_auth:/nomad/consul"
|
||||
# TODO: Backup to a meaningful location, this is just for testing
|
||||
repo = "/local/repo"
|
||||
# Read from secret file
|
||||
passphrase = env("BACKUP_PASSPHRASE")
|
||||
}
|
||||
|
||||
# Remove when using a proper backup destination
|
||||
task "Create dir for repo" {
|
||||
pre_script {
|
||||
on_backup = "echo 'Backing up something'"
|
||||
}
|
||||
pre_script {
|
||||
on_backup = "mkdir -p /local/repo"
|
||||
}
|
||||
}
|
||||
|
||||
task "Use consul snapshots" {
|
||||
pre_script {
|
||||
on_backup = "mkdir -p /local/consul"
|
@ -1,11 +1,23 @@
|
||||
job "Nextcloud" {
|
||||
schedule = "0 * * * *"
|
||||
schedule = "* * * * *"
|
||||
|
||||
config {
|
||||
repo = "rclone::ftp,env_auth:/nomad/nextcloud"
|
||||
# TODO: Backup to a meaningful location, this is just for testing
|
||||
repo = "/local/repo"
|
||||
# Read from secret file
|
||||
passphrase = env("BACKUP_PASSPHRASE")
|
||||
}
|
||||
|
||||
# Remove when using a proper backup destination
|
||||
task "Create dir for repo" {
|
||||
pre_script {
|
||||
on_backup = "echo 'Backing up something'"
|
||||
}
|
||||
pre_script {
|
||||
on_backup = "mkdir -p /local/repo"
|
||||
}
|
||||
}
|
||||
|
||||
mysql "Backup database" {
|
||||
hostname = env("MYSQL_HOST")
|
||||
port = env("MYSQL_PORT")
|
@ -1,8 +1,15 @@
|
||||
job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
|
||||
variable "nextcloud_backup" {
|
||||
type = string
|
||||
description = "HCL config for Restic Scheduler jobs"
|
||||
}
|
||||
|
||||
variable "consul_backup" {
|
||||
type = string
|
||||
description = "HCL config for Restic Scheduler jobs"
|
||||
}
|
||||
|
||||
job "backup-oneoff-n2" {
|
||||
datacenters = ["dc1"]
|
||||
%{ if batch_node == null ~}
|
||||
type = "system"
|
||||
%{ else ~}
|
||||
type = "batch"
|
||||
|
||||
parameterized {
|
||||
@ -15,21 +22,14 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
|
||||
task = "backup"
|
||||
snapshot = "latest"
|
||||
}
|
||||
%{ endif ~}
|
||||
|
||||
%{ if batch_node == null ~}
|
||||
|
||||
constraint {
|
||||
attribute = "$${node.unique.name}"
|
||||
operator = "set_contains_any"
|
||||
# Only deploy to nodes running tasks to backup
|
||||
value = "n1,n2"
|
||||
attribute = "${node.unique.name}"
|
||||
# Only node with a backup job so far
|
||||
# Remove when backing up all nodes
|
||||
value = "n2"
|
||||
}
|
||||
%{ else ~}
|
||||
constraint {
|
||||
attribute = "$${node.unique.name}"
|
||||
value = "${batch_node}"
|
||||
}
|
||||
%{ endif ~}
|
||||
|
||||
group "backup" {
|
||||
|
||||
@ -43,12 +43,11 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
|
||||
|
||||
volume "all-volumes" {
|
||||
type = "host"
|
||||
read_only = false
|
||||
read_only = true
|
||||
source = "all-volumes"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "backups"
|
||||
port = "metrics"
|
||||
|
||||
# Add connect to mysql
|
||||
@ -77,7 +76,7 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
|
||||
}
|
||||
|
||||
meta {
|
||||
metrics_addr = "$${NOMAD_ADDR_metrics}"
|
||||
metrics_addr = "${NOMAD_ADDR_metrics}"
|
||||
}
|
||||
}
|
||||
|
||||
@ -87,18 +86,17 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
|
||||
volume_mount {
|
||||
volume = "all-volumes"
|
||||
destination = "/data"
|
||||
read_only = false
|
||||
read_only = true
|
||||
}
|
||||
|
||||
config {
|
||||
image = "iamthefij/resticscheduler"
|
||||
ports = ["metrics"]
|
||||
args = [
|
||||
%{ if batch_node != null ~}
|
||||
"-once",
|
||||
"-$${NOMAD_META_task}",
|
||||
"$${NOMAD_META_job_name}",
|
||||
%{ endif ~}
|
||||
"-${NOMAD_META_task}",
|
||||
"${NOMAD_META_job_name}",
|
||||
# TODO: add restore arg here
|
||||
"/jobs/node-jobs.hcl",
|
||||
]
|
||||
|
||||
@ -117,26 +115,21 @@ job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
|
||||
}
|
||||
|
||||
env = {
|
||||
"MYSQL_HOST" = "$${NOMAD_UPSTREAM_IP_mysql_server}"
|
||||
"MYSQL_PORT" = "$${NOMAD_UPSTREAM_PORT_mysql_server}"
|
||||
"MYSQL_HOST" = "${NOMAD_UPSTREAM_IP_mysql_server}"
|
||||
"MYSQL_PORT" = "${NOMAD_UPSTREAM_PORT_mysql_server}"
|
||||
}
|
||||
|
||||
template {
|
||||
# Probably want to use database credentials that have access to dump all tables
|
||||
data = <<EOF
|
||||
{{ with secret "kv/data/nextcloud" -}}
|
||||
{{ with secret "kv/data/nextcloud" }}
|
||||
MYSQL_DATABASE={{ .Data.data.db_name }}
|
||||
MYSQL_USER={{ .Data.data.db_user }}
|
||||
MYSQL_PASSWORD={{ .Data.data.db_pass }}
|
||||
{{ end -}}
|
||||
{{ with secret "kv/data/backups" -}}
|
||||
{{ end }}
|
||||
{{ with secret "kv/data/backups" }}
|
||||
BACKUP_PASSPHRASE={{ .Data.data.backup_passphrase }}
|
||||
RCLONE_FTP_HOST={{ .Data.data.nas_ftp_host }}
|
||||
RCLONE_FTP_USER={{ .Data.data.nas_ftp_user }}
|
||||
RCLONE_FTP_PASS={{ .Data.data.nas_ftp_pass | toJSON }}
|
||||
RCLONE_FTP_EXPLICIT_TLS=true
|
||||
RCLONE_FTP_NO_CHECK_CERTIFICATE=true
|
||||
{{ end -}}
|
||||
{{ end }}
|
||||
EOF
|
||||
destination = "secrets/db.env"
|
||||
env = true
|
||||
@ -155,38 +148,16 @@ CONSUL_HTTP_ADDR={{ env "attr.unique.network.ip-address" }}:8500
|
||||
# Build jobs based on node
|
||||
data = <<EOF
|
||||
# Current node is {{ env "node.unique.name" }}
|
||||
# Consul backup below?
|
||||
{{ if eq (env "node.unique.name") "n2" -}}
|
||||
# Consul backup
|
||||
${file("${module_path}/jobs/consul.hcl")}
|
||||
${var.consul_backup}
|
||||
{{ end -}}
|
||||
|
||||
{{ range service "nextcloud" -}}
|
||||
{{ range service "nextcloud" }}
|
||||
# Nextcloud .Node {{ .Node }}
|
||||
{{ if eq .Node (env "node.unique.name") -}}
|
||||
${file("${module_path}/jobs/nextcloud.hcl")}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
|
||||
{{ range service "lldap" -}}
|
||||
# Lldap .Node {{ .Node }}
|
||||
{{ if eq .Node (env "node.unique.name") -}}
|
||||
${file("${module_path}/jobs/lldap.hcl")}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
|
||||
{{ range service "sonarr" -}}
|
||||
# Lldap .Node {{ .Node }}
|
||||
{{ if eq .Node (env "node.unique.name") -}}
|
||||
${file("${module_path}/jobs/sonarr.hcl")}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
|
||||
{{ range service "nzbget" -}}
|
||||
# Lldap .Node {{ .Node }}
|
||||
{{ if eq .Node (env "node.unique.name") -}}
|
||||
${file("${module_path}/jobs/nzbget.hcl")}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ if eq .Node (env "node.unique.name") }}
|
||||
${var.nextcloud_backup}
|
||||
{{ end }}{{ end }}
|
||||
EOF
|
||||
destination = "jobs/node-jobs.hcl"
|
||||
}
|
@ -4,7 +4,6 @@
|
||||
provider "registry.terraform.io/hashicorp/nomad" {
|
||||
version = "1.4.16"
|
||||
hashes = [
|
||||
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
|
||||
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
@ -23,7 +23,6 @@ job "blocky" {
|
||||
}
|
||||
|
||||
port "api" {
|
||||
host_network = "loopback"
|
||||
to = "4000"
|
||||
}
|
||||
}
|
||||
@ -69,8 +68,7 @@ job "blocky" {
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 20
|
||||
memory_max = 50
|
||||
memory = 50
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -108,7 +106,6 @@ job "blocky" {
|
||||
template {
|
||||
data = var.config_data
|
||||
destination = "app/config.yml"
|
||||
splay = "1m"
|
||||
}
|
||||
}
|
||||
}
|
@ -17,15 +17,6 @@ upstream:
|
||||
- https://dns10.quad9.net/dns-query
|
||||
- tcp-tls:dns10.quad9.net
|
||||
|
||||
conditional:
|
||||
mapping:
|
||||
home.arpa: 192.168.2.1
|
||||
in-addr.arpa: 192.168.2.1
|
||||
iot: 192.168.2.1
|
||||
local: 192.168.2.1
|
||||
thefij: 192.168.2.1
|
||||
.: 192.168.2.1
|
||||
|
||||
blocking:
|
||||
blackLists:
|
||||
ads:
|
@ -6,7 +6,6 @@
|
||||
vars_files:
|
||||
- consul_values.yml
|
||||
|
||||
tasks:
|
||||
- name: Add values
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
@ -16,6 +15,12 @@
|
||||
name: python-consul
|
||||
extra_args: --index-url https://pypi.org/simple
|
||||
|
||||
- name: Set hostname
|
||||
consul_kv:
|
||||
host: "{{ inventory_hostname }}"
|
||||
key: global/base_hostname
|
||||
value: dev.homelab
|
||||
|
||||
- name: Write values
|
||||
consul_kv:
|
||||
host: "{{ inventory_hostname }}"
|
||||
@ -40,25 +45,10 @@
|
||||
name: hvac
|
||||
extra_args: --index-url https://pypi.org/simple
|
||||
|
||||
- name: Check mount
|
||||
community.hashi_vault.vault_read:
|
||||
url: "http://{{ inventory_hostname }}:8200"
|
||||
token: "{{ root_token }}"
|
||||
path: "/sys/mounts/kv"
|
||||
ignore_errors: true
|
||||
register: check_mount
|
||||
|
||||
- name: Create kv mount
|
||||
community.hashi_vault.vault_write:
|
||||
url: "http://{{ inventory_hostname }}:8200"
|
||||
token: "{{ root_token }}"
|
||||
path: "/sys/mounts/kv"
|
||||
data:
|
||||
type: kv-v2
|
||||
when: check_mount is not succeeded
|
||||
|
||||
# This fails on first run because `root_token` isn't found
|
||||
# Fails after taht too because the kv/ space has not been created yet either! Oh noes!
|
||||
# Maybe move data bootstrapping to after the cluster is bootstrapped
|
||||
- name: Write values
|
||||
no_log: true
|
||||
community.hashi_vault.vault_write:
|
||||
url: "http://{{ inventory_hostname }}:8200"
|
||||
token: "{{ root_token }}"
|
||||
@ -67,11 +57,8 @@
|
||||
data:
|
||||
"{{ item.value }}"
|
||||
loop: "{{ hashi_vault_values | default({}) | dict2items }}"
|
||||
retries: 2
|
||||
delay: 10
|
||||
|
||||
- name: Write userpass
|
||||
no_log: true
|
||||
community.hashi_vault.vault_write:
|
||||
url: "http://{{ inventory_hostname }}:8200"
|
||||
token: "{{ root_token }}"
|
||||
|
@ -1,64 +0,0 @@
|
||||
---
|
||||
- name: Delete Consul data
|
||||
hosts: consul_instances
|
||||
|
||||
tasks:
|
||||
- name: Stop consul
|
||||
systemd:
|
||||
name: consul
|
||||
state: stopped
|
||||
become: true
|
||||
|
||||
- name: Stop vault
|
||||
systemd:
|
||||
name: consul
|
||||
state: stopped
|
||||
become: true
|
||||
|
||||
- name: Remove data dir
|
||||
file:
|
||||
path: /opt/consul
|
||||
state: absent
|
||||
become: true
|
||||
|
||||
- name: Delete Nomad data
|
||||
hosts: nomad_instances
|
||||
|
||||
tasks:
|
||||
- name: Stop nomad
|
||||
systemd:
|
||||
name: nomad
|
||||
state: stopped
|
||||
become: true
|
||||
|
||||
- name: Kill nomad
|
||||
shell:
|
||||
cmd: systemctl kill nomad
|
||||
become: true
|
||||
|
||||
- name: Stop all containers
|
||||
shell:
|
||||
cmd: docker ps -a | awk '/^[0-9abcdef]/{print $1}' | xargs -r docker stop
|
||||
become: true
|
||||
|
||||
- name: Remove all containers
|
||||
shell:
|
||||
cmd: docker ps -a | awk '/^[0-9abcdef]/{print $1}' | xargs -r docker rm
|
||||
become: true
|
||||
|
||||
- name: Unmount secrets
|
||||
shell:
|
||||
cmd: mount | awk '/nomad/ {print $3}' | xargs -n1 -r umount
|
||||
become: true
|
||||
|
||||
- name: Remove data dir
|
||||
file:
|
||||
path: /var/nomad
|
||||
state: absent
|
||||
become: true
|
||||
|
||||
- name: Remove data dir
|
||||
file:
|
||||
path: /opt/nomad/data
|
||||
state: absent
|
||||
become: true
|
115
nomad/core.tf
115
nomad/core.tf
@ -2,11 +2,116 @@ module "databases" {
|
||||
source = "./databases"
|
||||
}
|
||||
|
||||
module "core" {
|
||||
source = "./core"
|
||||
module "blocky" {
|
||||
source = "./blocky"
|
||||
|
||||
base_hostname = var.base_hostname
|
||||
|
||||
# Metrics and Blocky depend on databases
|
||||
depends_on = [module.databases]
|
||||
depends_on = [module.databases]
|
||||
}
|
||||
|
||||
module "traefik" {
|
||||
source = "./traefik"
|
||||
|
||||
consul_address = var.consul_address
|
||||
base_hostname = var.base_hostname
|
||||
}
|
||||
|
||||
module "metrics" {
|
||||
source = "./metrics"
|
||||
|
||||
consul_address = var.consul_address
|
||||
}
|
||||
|
||||
module "loki" {
|
||||
source = "./levant"
|
||||
|
||||
template_path = "service.nomad"
|
||||
variables = {
|
||||
name = "loki"
|
||||
image = "grafana/loki:2.2.1"
|
||||
service_port = 3100
|
||||
ingress = true
|
||||
sticky_disk = true
|
||||
healthcheck = "/ready"
|
||||
templates = jsonencode([
|
||||
{
|
||||
data = file("./loki-config.yml")
|
||||
dest = "/etc/loki/local-config.yaml"
|
||||
}
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
resource "consul_config_entry" "loki_intent" {
|
||||
name = "loki"
|
||||
kind = "service-intentions"
|
||||
|
||||
config_json = jsonencode({
|
||||
Sources = [
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "grafana"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "promtail"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "syslogng-promtail"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
resource "nomad_job" "syslog-ng" {
|
||||
hcl2 {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
jobspec = file("${path.module}/syslogng.nomad")
|
||||
}
|
||||
|
||||
resource "consul_config_entry" "syslogng_promtail_intent" {
|
||||
name = "syslogng-promtail"
|
||||
kind = "service-intentions"
|
||||
|
||||
config_json = jsonencode({
|
||||
Sources = [
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "syslogng"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
resource "consul_config_entry" "global_access" {
|
||||
name = "*"
|
||||
kind = "service-intentions"
|
||||
|
||||
config_json = jsonencode({
|
||||
Sources = [
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "traefik"
|
||||
Precedence = 6
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "deny"
|
||||
Name = "*"
|
||||
Precedence = 5
|
||||
Type = "consul"
|
||||
},
|
||||
]
|
||||
})
|
||||
}
|
||||
|
@ -1,59 +0,0 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/consul" {
|
||||
version = "2.16.2"
|
||||
hashes = [
|
||||
"h1:epldE7sZPBTQHnWEA4WlNJIOVT1UEX+/02SMg5nniaE=",
|
||||
"zh:0a2e11ca2ba650954951a087a1daec95eee2f3000456b295409a9880c4a10b1a",
|
||||
"zh:34f6bda06a0d1c213fa8d87d4313687681e67bc8c40c4cbaa7dbe59ce24a4f7e",
|
||||
"zh:5b85cf93db11ee890f720c317a38158927071feb634855786a0c0cd65825a43c",
|
||||
"zh:75ef915f3d087e6045751a66fbb7066a852a0944ec8c97200d1134dd84df7ffc",
|
||||
"zh:8a4a95697bd91ad51a581c12fe50ac61a114afba27895d027f77ac4154a7ea15",
|
||||
"zh:973d538c8d72793861a1ac9718249a9493f417a2b5096846367560054fd843b9",
|
||||
"zh:9feb2bdc06fdc2d8370cc9aad9a0c69e7e5ae38aac43f315c3f57507c57be030",
|
||||
"zh:c5709672d0afecbbe298bf519741ebcb9d04f02a73b5ee0c186dfa241aa5a524",
|
||||
"zh:c65c60570de6da7190e1e7762577655a463caeb59bc5d38e33034821ed0cbcb9",
|
||||
"zh:c958d6282650fc472aade61d5df4300936033f43cfb898293ef86aceccdfdf1d",
|
||||
"zh:cdd3632c81e1d11d3becd193aaa061688840f39147950c45c4301d042743ae6a",
|
||||
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/external" {
|
||||
version = "2.2.2"
|
||||
hashes = [
|
||||
"h1:e7RpnZ2PbJEEPnfsg7V0FNwbfSk0/Z3FdrLsXINBmDY=",
|
||||
"zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca",
|
||||
"zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28",
|
||||
"zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b",
|
||||
"zh:719d6ef39c50e4cffc67aa67d74d195adaf42afcf62beab132dafdb500347d39",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:7fbfc4d37435ac2f717b0316f872f558f608596b389b895fcb549f118462d327",
|
||||
"zh:8ac71408204db606ce63fe8f9aeaf1ddc7751d57d586ec421e62d440c402e955",
|
||||
"zh:a4cacdb06f114454b6ed0033add28006afa3f65a0ea7a43befe45fc82e6809fb",
|
||||
"zh:bb5ce3132b52ae32b6cc005bc9f7627b95259b9ffe556de4dad60d47d47f21f0",
|
||||
"zh:bb60d2976f125ffd232a7ccb4b3f81e7109578b23c9c6179f13a11d125dca82a",
|
||||
"zh:f9540ecd2e056d6e71b9ea5f5a5cf8f63dd5c25394b9db831083a9d4ea99b372",
|
||||
"zh:ffd998b55b8a64d4335a090b6956b4bf8855b290f7554dd38db3302de9c41809",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/nomad" {
|
||||
version = "1.4.19"
|
||||
hashes = [
|
||||
"h1:EdBny2gaLr/IE+l+6csyCKeIGFMYZ/4tHKpcbS7ArgE=",
|
||||
"zh:2f3ceeb3318a6304026035b0ac9ee3e52df04913bb9ee78827e58c5398b41254",
|
||||
"zh:3fbe76c7d957d20dfe3c8c0528b33084651f22a95be9e0452b658e0922916e2a",
|
||||
"zh:595671a05828cfe6c42ef73aac894ac39f81a52cc662a76f37eb74ebe04ddf75",
|
||||
"zh:5d76e8788d2af3e60daf8076babf763ec887480bbb9734baccccd8fcddf4f03e",
|
||||
"zh:676985afeaca6e67b22d60d43fd0ed7055763029ffebc3026089fe2fd3b4a288",
|
||||
"zh:69152ce6164ac999a640cff962ece45208270e1ac37c10dac484eeea5cf47275",
|
||||
"zh:6da0b15c05b81f947ec8e139bd81eeeb05c0d36eb5a967b985d0625c60998b40",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:822c0a3bbada5e38099a379db8b2e339526843699627c3be3664cc3b3752bab7",
|
||||
"zh:af23af2f98a84695b25c8eba7028a81ad4aad63c44aefb79e01bbe2dc82e7f78",
|
||||
"zh:e36cac9960b7506d92925b667254322520966b9c3feb3ca6102e57a1fb9b1761",
|
||||
"zh:ffd1e096c1cc35de879c740a91918e9f06b627818a3cb4b1d87b829b54a6985f",
|
||||
]
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
job "ddclient" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
group "ddclient" {
|
||||
|
||||
task "ddclient" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "linuxserver/ddclient:3.9.1"
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
source = "secrets/ddclient.conf"
|
||||
target = "/config/ddclient.conf"
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
policies = [
|
||||
"access-tables",
|
||||
"nomad-task",
|
||||
]
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
daemon=900
|
||||
ssl=yes
|
||||
use=web
|
||||
|
||||
protocol=cloudflare,
|
||||
zone={{ key "ddclient/zone" }},
|
||||
ttl=1,
|
||||
{{ with secret "kv/data/cloudflare" -}}
|
||||
login={{ .Data.data.api_user }},
|
||||
password={{ .Data.data.api_key }}
|
||||
# login=token,
|
||||
# password={{ .Data.data.api_token_dns_edit_all }}
|
||||
{{ end -}}
|
||||
|
||||
{{ key "ddclient/domain" }}
|
||||
EOH
|
||||
destination = "secrets/ddclient.conf"
|
||||
change_mode = "restart"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 50
|
||||
memory_max = 100
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,140 +0,0 @@
|
||||
job "lldap" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
group "lldap" {
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
|
||||
port "web" {
|
||||
host_network = "loopback"
|
||||
to = 17170
|
||||
}
|
||||
|
||||
port "ldap" {
|
||||
host_network = "loopback"
|
||||
to = 3890
|
||||
}
|
||||
}
|
||||
|
||||
volume "lldap-data" {
|
||||
type = "host"
|
||||
read_only = false
|
||||
source = "lldap-data"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "lldap"
|
||||
port = "ldap"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 3890
|
||||
|
||||
config {
|
||||
protocol = "tcp"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 20
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "ldap-admin"
|
||||
port = "web"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 17170
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 20
|
||||
memory = 20
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.ldap-admin.entryPoints=websecure",
|
||||
]
|
||||
}
|
||||
|
||||
task "lldap" {
|
||||
driver = "docker"
|
||||
|
||||
volume_mount {
|
||||
volume = "lldap-data"
|
||||
destination = "/data"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
config {
|
||||
image = "nitnelave/lldap"
|
||||
ports = ["ldap", "web"]
|
||||
args = ["run", "--config-file", "/lldap_config.toml"]
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
source = "secrets/lldap_config.toml"
|
||||
target = "/lldap_config.toml"
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
policies = [
|
||||
"access-tables",
|
||||
"nomad-task",
|
||||
]
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
database_url = "sqlite:///data/users.db?mode=rwc"
|
||||
key_file = "/data/private_key"
|
||||
ldap_base_dn = "{{ keyOrDefault "global/ldap/base_dn" "dc=example,dc=com" }}"
|
||||
{{ with secret "kv/data/lldap" -}}
|
||||
jwt_secret = "{{ .Data.data.jwt_secret }}"
|
||||
ldap_user_dn = "{{ .Data.data.admin_user }}"
|
||||
ldap_user_email = "{{ .Data.data.admin_email }}"
|
||||
ldap_user_pass = "{{ .Data.data.admin_password }}"
|
||||
{{ end -}}
|
||||
{{ with secret "kv/data/smtp" -}}
|
||||
[smtp_options]
|
||||
enable_password_reset = true
|
||||
server = "{{ .Data.data.server }}"
|
||||
port = {{ .Data.data.port }}
|
||||
tls_required = {{ .Data.data.tls }}
|
||||
user = "{{ .Data.data.user }}"
|
||||
password = "{{ .Data.data.password }}"
|
||||
{{ with secret "kv/data/lldap" -}}
|
||||
from = "{{ .Data.data.smtp_from }}"
|
||||
reply_to = "{{ .Data.data.smtp_reply_to }}"
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
EOH
|
||||
destination = "secrets/lldap_config.toml"
|
||||
change_mode = "restart"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 10
|
||||
memory = 20
|
||||
memory_max = 100
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,134 +0,0 @@
|
||||
|
||||
module "blocky" {
|
||||
source = "./blocky"
|
||||
|
||||
base_hostname = var.base_hostname
|
||||
# Not in this module
|
||||
# depends_on = [module.databases]
|
||||
}
|
||||
|
||||
module "traefik" {
|
||||
source = "./traefik"
|
||||
|
||||
base_hostname = var.base_hostname
|
||||
}
|
||||
|
||||
module "nomad_login" {
|
||||
source = "../levant"
|
||||
|
||||
template_path = "service.nomad"
|
||||
variables = {
|
||||
name = "nomad-login"
|
||||
image = "iamthefij/nomad-vault-login"
|
||||
service_port = 5000
|
||||
ingress = true
|
||||
ingress_rule = "Host(`nomad.thefij.rocks`) && PathPrefix(`/login`)"
|
||||
env = jsonencode({
|
||||
VAULT_ADDR = "http://$${attr.unique.network.ip-address}:8200",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
module "metrics" {
|
||||
source = "./metrics"
|
||||
# Not in this module
|
||||
# depends_on = [module.databases]
|
||||
}
|
||||
|
||||
module "loki" {
|
||||
source = "../levant"
|
||||
|
||||
template_path = "service.nomad"
|
||||
variables = {
|
||||
name = "loki"
|
||||
image = "grafana/loki:2.2.1"
|
||||
service_port = 3100
|
||||
ingress = true
|
||||
sticky_disk = true
|
||||
healthcheck = "/ready"
|
||||
templates = jsonencode([
|
||||
{
|
||||
data = file("${path.module}/loki-config.yml")
|
||||
dest = "/etc/loki/local-config.yaml"
|
||||
}
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
resource "consul_config_entry" "loki_intent" {
|
||||
name = "loki"
|
||||
kind = "service-intentions"
|
||||
|
||||
config_json = jsonencode({
|
||||
Sources = [
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "grafana"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "promtail"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "syslogng-promtail"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
resource "nomad_job" "syslog-ng" {
|
||||
jobspec = file("${path.module}/syslogng.nomad")
|
||||
}
|
||||
|
||||
resource "nomad_job" "ddclient" {
|
||||
jobspec = file("${path.module}/ddclient.nomad")
|
||||
}
|
||||
|
||||
resource "nomad_job" "lldap" {
|
||||
jobspec = file("${path.module}/lldap.nomad")
|
||||
}
|
||||
|
||||
resource "consul_config_entry" "syslogng_promtail_intent" {
|
||||
name = "syslogng-promtail"
|
||||
kind = "service-intentions"
|
||||
|
||||
config_json = jsonencode({
|
||||
Sources = [
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "syslogng"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
resource "consul_config_entry" "global_access" {
|
||||
name = "*"
|
||||
kind = "service-intentions"
|
||||
|
||||
config_json = jsonencode({
|
||||
Sources = [
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "traefik"
|
||||
Precedence = 6
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "deny"
|
||||
Name = "*"
|
||||
Precedence = 5
|
||||
Type = "consul"
|
||||
},
|
||||
]
|
||||
})
|
||||
}
|
@ -1,150 +0,0 @@
|
||||
job "metrics" {
|
||||
datacenters = ["dc1"]
|
||||
type = "system"
|
||||
|
||||
group "promtail" {
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
|
||||
port "promtail" {
|
||||
to = 9080
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "promtail"
|
||||
port = "promtail"
|
||||
|
||||
meta {
|
||||
metrics_addr = "${NOMAD_ADDR_promtail}"
|
||||
nomad_dc = "${NOMAD_DC}"
|
||||
nomad_node_name = "${node.unique.name}"
|
||||
}
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 9080
|
||||
|
||||
upstreams {
|
||||
destination_name = "loki"
|
||||
local_bind_port = 1000
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 20
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/metrics"
|
||||
port = "promtail"
|
||||
interval = "10s"
|
||||
timeout = "10s"
|
||||
}
|
||||
}
|
||||
|
||||
task "promtail" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "grafana/promtail:2.2.1"
|
||||
args = ["-config.file=/etc/promtail/promtail.yml"]
|
||||
ports = ["promtail"]
|
||||
|
||||
# Mount config
|
||||
mount {
|
||||
type = "bind"
|
||||
target = "/etc/promtail/promtail.yml"
|
||||
source = "local/promtail.yml"
|
||||
}
|
||||
|
||||
# Bind mount host machine-id and log directories
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
source = "/etc/machine-id"
|
||||
target = "/etc/machine-id"
|
||||
readonly = true
|
||||
}
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
source = "/var/log/journal/"
|
||||
target = "/var/log/journal/"
|
||||
readonly = true
|
||||
}
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
source = "/run/log/journal/"
|
||||
target = "/run/log/journal/"
|
||||
readonly = true
|
||||
}
|
||||
|
||||
# mount {
|
||||
# type = "bind"
|
||||
# source = "/var/log/audit"
|
||||
# target = "/var/log/audit"
|
||||
# readonly = true
|
||||
# }
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
---
|
||||
server:
|
||||
http_listen_address: 0.0.0.0
|
||||
http_listen_port: 9080
|
||||
|
||||
clients:
|
||||
# loki upstream: {{ env "NOMAD_UPSTREAM_ADDR_loki" }}
|
||||
- url: http://{{ env "NOMAD_UPSTREAM_ADDR_loki" }}/loki/api/v1/push
|
||||
|
||||
scrape_configs:
|
||||
|
||||
- job_name: journal
|
||||
journal:
|
||||
json: false
|
||||
max_age: 12h
|
||||
path: /var/log/journal
|
||||
labels:
|
||||
job: systemd-journal
|
||||
relabel_configs:
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
target_label: unit
|
||||
- source_labels: ['__journal__hostname']
|
||||
target_label: hostname
|
||||
- source_labels: ['__journal__transport']
|
||||
target_label: journal_transport
|
||||
# Docker log labels
|
||||
- source_labels: ['__journal_syslog_identifier']
|
||||
target_label: syslog_identifier
|
||||
- source_labels: ['__journal_image_name']
|
||||
target_label: docker_image_name
|
||||
- source_labels: ['__journal_container_name']
|
||||
target_label: docker_container_name
|
||||
- source_labels: ['__journal_container_id']
|
||||
target_label: docker_container_id
|
||||
- source_labels: ['__journal_com_docker_compose_project']
|
||||
target_label: docker_compose_project
|
||||
- source_labels: ['__journal_com_docker_compose_service']
|
||||
target_label: docker_compose_service
|
||||
EOF
|
||||
destination = "local/promtail.yml"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 50
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,21 +0,0 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/nomad" {
|
||||
version = "1.4.17"
|
||||
hashes = [
|
||||
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
|
||||
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
|
||||
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
|
||||
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
|
||||
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
|
||||
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
|
||||
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
|
||||
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
|
||||
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
|
||||
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
|
||||
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
|
||||
]
|
||||
}
|
@ -1,302 +0,0 @@
|
||||
variable "base_hostname" {
|
||||
type = string
|
||||
description = "Base hostname to serve content from"
|
||||
default = "dev.homelab"
|
||||
}
|
||||
|
||||
job "traefik" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
priority = 100
|
||||
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
value = "ingress"
|
||||
}
|
||||
|
||||
constraint {
|
||||
distinct_hosts = true
|
||||
}
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
# canary = 1
|
||||
# auto_promote = true
|
||||
auto_revert = true
|
||||
}
|
||||
|
||||
group "traefik" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
port "web" {
|
||||
static = 80
|
||||
}
|
||||
|
||||
port "websecure" {
|
||||
static = 443
|
||||
}
|
||||
|
||||
port "syslog" {
|
||||
static = 514
|
||||
}
|
||||
}
|
||||
|
||||
ephemeral_disk {
|
||||
migrate = true
|
||||
sticky = true
|
||||
}
|
||||
|
||||
service {
|
||||
name = "traefik"
|
||||
port = "web"
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/ping"
|
||||
port = "web"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
|
||||
connect {
|
||||
native = true
|
||||
}
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.traefik.entryPoints=websecure",
|
||||
"traefik.http.routers.traefik.service=api@internal",
|
||||
]
|
||||
}
|
||||
|
||||
task "traefik" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "traefik:2.6"
|
||||
|
||||
ports = ["web", "websecure"]
|
||||
network_mode = "host"
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
target = "/etc/traefik"
|
||||
source = "local/config"
|
||||
}
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
target = "/etc/traefik/usersfile"
|
||||
source = "secrets/usersfile"
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
policies = ["access-tables", "nomad-task"]
|
||||
}
|
||||
|
||||
template {
|
||||
# Avoid conflict with TOML lists [[ ]] and Go templates {{ }}
|
||||
left_delimiter = "<<"
|
||||
right_delimiter = ">>"
|
||||
data = <<EOH
|
||||
[log]
|
||||
level = "DEBUG"
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.web]
|
||||
address = ":80"
|
||||
[entryPoints.web.http]
|
||||
[entryPoints.web.http.redirections]
|
||||
[entryPoints.web.http.redirections.entrypoint]
|
||||
to = "websecure"
|
||||
scheme = "https"
|
||||
|
||||
[entryPoints.websecure]
|
||||
address = ":443"
|
||||
[entryPoints.websecure.http.tls]
|
||||
<< if keyExists "traefik/acme/email" ->>
|
||||
certResolver = "letsEncrypt"
|
||||
[[entryPoints.websecure.http.tls.domains]]
|
||||
main = "*.<< keyOrDefault "global/base_hostname" "${var.base_hostname}" >>"
|
||||
<< end ->>
|
||||
|
||||
[entryPoints.metrics]
|
||||
address = ":8989"
|
||||
|
||||
[entryPoints.syslogtcp]
|
||||
address = ":514"
|
||||
|
||||
[entryPoints.syslogudp]
|
||||
address = ":514/udp"
|
||||
|
||||
[api]
|
||||
dashboard = true
|
||||
|
||||
[ping]
|
||||
entrypoint = "web"
|
||||
|
||||
[metrics]
|
||||
[metrics.prometheus]
|
||||
entrypoint = "metrics"
|
||||
# manualRouting = true
|
||||
|
||||
[providers.file]
|
||||
directory = "/etc/traefik/conf"
|
||||
watch = true
|
||||
|
||||
[providers.consulCatalog]
|
||||
connectAware = true
|
||||
connectByDefault = true
|
||||
exposedByDefault = false
|
||||
defaultRule = "Host(`{{normalize .Name}}.<< keyOrDefault "global/base_hostname" "${var.base_hostname}" >>`)"
|
||||
[providers.consulCatalog.endpoint]
|
||||
address = "http://<< env "CONSUL_HTTP_ADDR" >>"
|
||||
|
||||
<< if keyExists "traefik/acme/email" ->>
|
||||
[certificatesResolvers.letsEncrypt.acme]
|
||||
email = "<< key "traefik/acme/email" >>"
|
||||
# Store in /local because /secrets doesn't persist with ephemeral disk
|
||||
storage = "/local/acme.json"
|
||||
[certificatesResolvers.letsEncrypt.acme.dnsChallenge]
|
||||
provider = "cloudflare"
|
||||
resolvers = ["1.1.1.1:53", "8.8.8.8:53"]
|
||||
delayBeforeCheck = 0
|
||||
<< end ->>
|
||||
EOH
|
||||
destination = "local/config/traefik.toml"
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ with secret "kv/data/cloudflare" }}
|
||||
CF_DNS_API_TOKEN={{ .Data.data.api_token_dns_edit }}
|
||||
CF_ZONE_API_TOKEN={{ .Data.data.api_token_zone_read }}
|
||||
{{ end }}
|
||||
EOH
|
||||
destination = "secrets/cloudflare.env"
|
||||
env = true
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
[http]
|
||||
[http.routers]
|
||||
[http.routers.nomad]
|
||||
entryPoints = ["websecure"]
|
||||
# middlewares = []
|
||||
service = "nomad"
|
||||
rule = "Host(`nomad.{{ keyOrDefault "global/base_hostname" "${var.base_hostname}" }}`)"
|
||||
[http.routers.consul]
|
||||
entryPoints = ["websecure"]
|
||||
# middlewares = []
|
||||
service = "consul"
|
||||
rule = "Host(`consul.{{ keyOrDefault "global/base_hostname" "${var.base_hostname}" }}`)"
|
||||
[http.routers.vault]
|
||||
entryPoints = ["websecure"]
|
||||
# middlewares = []
|
||||
service = "vault"
|
||||
rule = "Host(`vault.{{ keyOrDefault "global/base_hostname" "${var.base_hostname}" }}`)"
|
||||
|
||||
[http.services]
|
||||
{{ with service "nomad-client" -}}
|
||||
[http.services.nomad]
|
||||
[http.services.nomad.loadBalancer]
|
||||
{{ range . -}}
|
||||
[[http.services.nomad.loadBalancer.servers]]
|
||||
url = "http://{{ .Address }}:{{ .Port }}"
|
||||
{{ end }}
|
||||
{{- end }}
|
||||
{{ with service "consul" -}}
|
||||
[http.services.consul]
|
||||
[http.services.consul.loadBalancer]
|
||||
{{ range . -}}
|
||||
[[http.services.consul.loadBalancer.servers]]
|
||||
# Not using .Port because that's an RPC port
|
||||
url = "http://{{ .Address }}:8500"
|
||||
{{ end }}
|
||||
{{- end }}
|
||||
{{ with service "vault" -}}
|
||||
[http.services.vault]
|
||||
[http.services.vault.loadBalancer]
|
||||
[http.services.vault.loadBalancer.sticky.cookie]
|
||||
{{ range . -}}
|
||||
[[http.services.vault.loadBalancer.servers]]
|
||||
url = "http://{{ .Address }}:{{ .Port }}"
|
||||
{{ end }}
|
||||
{{- end }}
|
||||
EOH
|
||||
destination = "local/config/conf/route-hashi.toml"
|
||||
change_mode = "noop"
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ with service "syslogng" -}}
|
||||
[tcp.routers]
|
||||
[tcp.routers.syslogtcp]
|
||||
entryPoints = ["syslogtcp"]
|
||||
service = "syslogngtcp"
|
||||
rule = "HostSNI(`*`)"
|
||||
|
||||
[tcp.services]
|
||||
[tcp.services.syslogngtcp]
|
||||
[tcp.services.syslogngtcp.loadBalancer]
|
||||
{{ range . -}}
|
||||
[[tcp.services.syslogngtcp.loadBalancer.servers]]
|
||||
address = "{{ .Address }}:{{ .Port }}"
|
||||
{{ end -}}
|
||||
{{ end }}
|
||||
|
||||
{{ with service "syslogng" -}}
|
||||
[udp.routers]
|
||||
[udp.routers.syslogudp]
|
||||
entryPoints = ["syslogudp"]
|
||||
service = "syslogngudp"
|
||||
|
||||
[udp.services]
|
||||
[udp.services.syslogngudp]
|
||||
[udp.services.syslogngudp.loadBalancer]
|
||||
{{ range . -}}
|
||||
[[udp.services.syslogngudp.loadBalancer.servers]]
|
||||
address = "{{ .Address }}:{{ .Port }}"
|
||||
{{ end -}}
|
||||
{{ end }}
|
||||
EOH
|
||||
destination = "local/config/conf/route-syslog-ng.toml"
|
||||
change_mode = "noop"
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
[http.middlewares]
|
||||
{{ with secret "kv/data/traefik" }}
|
||||
{{ if .Data.data.usersfile }}
|
||||
[http.middlewares.basic-auth.basicAuth]
|
||||
usersFile = "/etc/traefik/usersfile"
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
EOH
|
||||
destination = "local/config/conf/middlewares.toml"
|
||||
change_mode = "noop"
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ with secret "kv/data/traefik" }}
|
||||
{{ .Data.data.usersfile }}
|
||||
{{ end }}
|
||||
EOH
|
||||
destination = "secrets/usersfile"
|
||||
change_mode = "noop"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 100
|
||||
memory_max = 500
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,16 +0,0 @@
|
||||
variable "base_hostname" {
|
||||
type = string
|
||||
description = "Base hostname to serve content from"
|
||||
default = "dev.homelab"
|
||||
}
|
||||
|
||||
resource "nomad_job" "traefik" {
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
"base_hostname" = "${var.base_hostname}",
|
||||
}
|
||||
}
|
||||
|
||||
jobspec = file("${path.module}/traefik.nomad")
|
||||
}
|
@ -1,5 +0,0 @@
|
||||
variable "base_hostname" {
|
||||
type = string
|
||||
description = "Base hostname to serve content from"
|
||||
default = "dev.homelab"
|
||||
}
|
@ -1,40 +0,0 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/consul" {
|
||||
version = "2.15.1"
|
||||
hashes = [
|
||||
"h1:PexyQBRLDA+SR+sWlzYBZswry5O5h/tTfj87CaECtLc=",
|
||||
"zh:1806830a3cf103e65e772a7d28fd4df2788c29a029fb2def1326bc777ad107ed",
|
||||
"zh:252be544fb4c9daf09cad7d3776daf5fa66b62740d3ea9d6d499a7b1697c3433",
|
||||
"zh:50985fe02a8e5ae47c75d7c28c911b25d7dc4716cff2ed55ca05889ab77a1f73",
|
||||
"zh:54cf0ec90538703c66937c77e8d72a38d5af47437eb0b8b55eb5836c5d288878",
|
||||
"zh:704f536c621337e06fffef6d5f49ac81f52d249f937250527c12884cb83aefed",
|
||||
"zh:896d8ef6d0b555299f124eb25bce8a17d735da14ef21f07582098d301f47da30",
|
||||
"zh:976277a85b0a0baafe267cc494f766448d1da5b6936ddcb3ce393bd4d22f08d2",
|
||||
"zh:c7faa9a2b11bc45833a3e8e340f22f1ecf01597eaeffa7669234b4549d7dfa85",
|
||||
"zh:caf851ef9c8ce482864badf7058f9278d4537112fa236efd8f1a9315801d9061",
|
||||
"zh:db203435d58b0ac842540861b3307a623423275d85754c171773f3b210ae5b24",
|
||||
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
|
||||
"zh:f710a37190429045d109edd35de69db3b5f619919c2fa04c77a3a639fea9fd7d",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/nomad" {
|
||||
version = "1.4.17"
|
||||
hashes = [
|
||||
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
|
||||
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
|
||||
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
|
||||
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
|
||||
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
|
||||
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
|
||||
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
|
||||
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
|
||||
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
|
||||
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
|
||||
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
|
||||
]
|
||||
}
|
@ -25,6 +25,7 @@ job "adminer" {
|
||||
|
||||
upstreams {
|
||||
destination_name = "mysql-server"
|
||||
# TODO: how do I get these to not bind to the host eth0 address
|
||||
local_bind_port = 4040
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
job "mysql-server" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
priority = 80
|
||||
|
||||
group "mysql-server" {
|
||||
count = 1
|
||||
|
@ -42,12 +42,6 @@ resource "consul_config_entry" "mysql_intents" {
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "grafana"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
]
|
||||
})
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
job "redis" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
priority = 60
|
||||
|
||||
group "cache" {
|
||||
count = 1
|
||||
@ -53,7 +52,7 @@ job "redis" {
|
||||
|
||||
config {
|
||||
image = "redis:6"
|
||||
args = ["redis-server", "--save", "60", "1", "--loglevel", "warning", "--dir", "${NOMAD_ALLOC_DIR}/data"]
|
||||
args = ["redis-server", "--save", "60", "1", "--loglevel", "warning"]
|
||||
ports = ["main"]
|
||||
}
|
||||
|
||||
|
@ -4,7 +4,6 @@
|
||||
provider "registry.terraform.io/hashicorp/nomad" {
|
||||
version = "1.4.16"
|
||||
hashes = [
|
||||
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
|
||||
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
363
nomad/metrics/exporters.nomad
Normal file
363
nomad/metrics/exporters.nomad
Normal file
@ -0,0 +1,363 @@
|
||||
job "metrics" {
|
||||
datacenters = ["dc1"]
|
||||
type = "system"
|
||||
|
||||
group "cadvisor" {
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
|
||||
port "cadvisor" {
|
||||
to = 8080
|
||||
}
|
||||
|
||||
port "expose" {
|
||||
}
|
||||
|
||||
port "cadvisor_envoy_metrics" {
|
||||
to = 9102
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "cadvisor"
|
||||
port = "cadvisor"
|
||||
|
||||
meta {
|
||||
metrics_addr = "${NOMAD_ADDR_expose}"
|
||||
envoy_metrics_addr = "${NOMAD_ADDR_cadvisor_envoy_metrics}"
|
||||
nomad_dc = "${NOMAD_DC}"
|
||||
nomad_node_name = "${node.unique.name}"
|
||||
}
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 8080
|
||||
|
||||
expose {
|
||||
path {
|
||||
path = "/metrics"
|
||||
protocol = "http"
|
||||
local_path_port = 8080
|
||||
listener_port = "expose"
|
||||
}
|
||||
}
|
||||
|
||||
config {
|
||||
envoy_prometheus_bind_addr = "0.0.0.0:9102"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 20
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/metrics"
|
||||
port = "cadvisor"
|
||||
interval = "10s"
|
||||
timeout = "10s"
|
||||
}
|
||||
}
|
||||
|
||||
task "cadvisor" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
# image = "iamthefij/cadvisor:0.37.5"
|
||||
image = "gcr.io/cadvisor/cadvisor:v0.39.3"
|
||||
args = ["--docker_only=true"]
|
||||
|
||||
ports = ["cadvisor"]
|
||||
|
||||
# volumes = [
|
||||
# "/:/rootfs:ro",
|
||||
# "/var/run:/var/run:rw",
|
||||
# "/sys:/sys:ro",
|
||||
# "/var/lib/docker/:/var/lib/docker:ro",
|
||||
# "/cgroup:/cgroup:ro",
|
||||
# "/etc/machine-id:/etc/machine-id:ro",
|
||||
# ]
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
source = "/"
|
||||
target = "/rootfs"
|
||||
readonly = true
|
||||
}
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
source = "/var/run"
|
||||
target = "/var/run"
|
||||
readonly = false
|
||||
}
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
source = "/sys"
|
||||
target = "/sys"
|
||||
readonly = true
|
||||
}
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
source = "/var/lib/docker"
|
||||
target = "/var/lib/docker"
|
||||
readonly = true
|
||||
}
|
||||
|
||||
# mount {
|
||||
# type = "bind"
|
||||
# source = "/cgroup"
|
||||
# target = "/cgroup"
|
||||
# readonly = true
|
||||
# }
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
source = "/etc/machine-id"
|
||||
target = "/etc/machine-id"
|
||||
readonly = true
|
||||
}
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 100
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
group "node_exporter" {
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
|
||||
port "node_exporter" {
|
||||
to = 9100
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "nodeexporter"
|
||||
port = "node_exporter"
|
||||
|
||||
meta {
|
||||
metrics_addr = "${NOMAD_ADDR_node_exporter}"
|
||||
nomad_dc = "${NOMAD_DC}"
|
||||
nomad_node_name = "${node.unique.name}"
|
||||
}
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 9100
|
||||
|
||||
expose {
|
||||
path {
|
||||
path = "/metrics"
|
||||
protocol = "http"
|
||||
local_path_port = 9100
|
||||
listener_port = "node_exporter"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 20
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/metrics"
|
||||
port = "node_exporter"
|
||||
interval = "10s"
|
||||
timeout = "10s"
|
||||
}
|
||||
}
|
||||
|
||||
task "node_exporter" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "prom/node-exporter:v1.0.1"
|
||||
args = ["--path.rootfs", "/host"]
|
||||
|
||||
ports = ["node_exporter"]
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
source = "/"
|
||||
target = "/host"
|
||||
readonly = true
|
||||
}
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 50
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
group "promtail" {
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
|
||||
port "promtail" {
|
||||
to = 9080
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "promtail"
|
||||
port = "promtail"
|
||||
|
||||
meta {
|
||||
metrics_addr = "${NOMAD_ADDR_promtail}"
|
||||
nomad_dc = "${NOMAD_DC}"
|
||||
nomad_node_name = "${node.unique.name}"
|
||||
}
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 9080
|
||||
|
||||
upstreams {
|
||||
destination_name = "loki"
|
||||
local_bind_port = 1000
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 20
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/metrics"
|
||||
port = "promtail"
|
||||
interval = "10s"
|
||||
timeout = "10s"
|
||||
}
|
||||
}
|
||||
|
||||
task "promtail" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "grafana/promtail:2.2.1"
|
||||
args = ["-config.file=/etc/promtail/promtail.yml"]
|
||||
ports = ["promtail"]
|
||||
|
||||
# Mount config
|
||||
mount {
|
||||
type = "bind"
|
||||
target = "/etc/promtail/promtail.yml"
|
||||
source = "local/promtail.yml"
|
||||
}
|
||||
|
||||
# Bind mount host machine-id and log directories
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
source = "/etc/machine-id"
|
||||
target = "/etc/machine-id"
|
||||
readonly = true
|
||||
}
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
source = "/var/log/journal/"
|
||||
target = "/var/log/journal/"
|
||||
readonly = true
|
||||
}
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
source = "/run/log/journal/"
|
||||
target = "/run/log/journal/"
|
||||
readonly = true
|
||||
}
|
||||
|
||||
# mount {
|
||||
# type = "bind"
|
||||
# source = "/var/log/audit"
|
||||
# target = "/var/log/audit"
|
||||
# readonly = true
|
||||
# }
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
---
|
||||
server:
|
||||
http_listen_address: 0.0.0.0
|
||||
http_listen_port: 9080
|
||||
|
||||
clients:
|
||||
# loki upstream: {{ env "NOMAD_UPSTREAM_ADDR_loki" }}
|
||||
- url: http://{{ env "NOMAD_UPSTREAM_ADDR_loki" }}/loki/api/v1/push
|
||||
|
||||
scrape_configs:
|
||||
|
||||
- job_name: journal
|
||||
journal:
|
||||
json: false
|
||||
max_age: 12h
|
||||
path: /var/log/journal
|
||||
labels:
|
||||
job: systemd-journal
|
||||
relabel_configs:
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
target_label: unit
|
||||
- source_labels: ['__journal__hostname']
|
||||
target_label: hostname
|
||||
- source_labels: ['__journal__transport']
|
||||
target_label: journal_transport
|
||||
# Docker log labels
|
||||
- source_labels: ['__journal_syslog_identifier']
|
||||
target_label: syslog_identifier
|
||||
- source_labels: ['__journal_image_name']
|
||||
target_label: docker_image_name
|
||||
- source_labels: ['__journal_container_name']
|
||||
target_label: docker_container_name
|
||||
- source_labels: ['__journal_container_id']
|
||||
target_label: docker_container_id
|
||||
- source_labels: ['__journal_com_docker_compose_project']
|
||||
target_label: docker_compose_project
|
||||
- source_labels: ['__journal_com_docker_compose_service']
|
||||
target_label: docker_compose_service
|
||||
EOF
|
||||
destination = "local/promtail.yml"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 20
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -38,11 +38,6 @@ job "grafana" {
|
||||
destination_name = "loki"
|
||||
local_bind_port = 3100
|
||||
}
|
||||
|
||||
upstreams {
|
||||
destination_name = "mysql-server"
|
||||
local_bind_port = 6060
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -67,70 +62,6 @@ job "grafana" {
|
||||
]
|
||||
}
|
||||
|
||||
task "grafana-bootstrap" {
|
||||
driver = "docker"
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
|
||||
config {
|
||||
image = "mysql:8"
|
||||
args = [
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"/usr/bin/mysql --defaults-extra-file=/task/my.cnf < /task/bootstrap.sql",
|
||||
]
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
source = "local/"
|
||||
target = "/task/"
|
||||
}
|
||||
}
|
||||
|
||||
vault {
|
||||
policies = [
|
||||
"access-tables",
|
||||
"nomad-task",
|
||||
]
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
[client]
|
||||
host={{ env "NOMAD_UPSTREAM_IP_mysql_server" }}
|
||||
port={{ env "NOMAD_UPSTREAM_PORT_mysql_server" }}
|
||||
user=root
|
||||
{{ with secret "kv/data/mysql" }}
|
||||
password={{ .Data.data.root_password }}
|
||||
{{ end }}
|
||||
EOF
|
||||
destination = "local/my.cnf"
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
{{ with secret "kv/data/grafana" -}}
|
||||
{{ if .Data.data.db_name -}}
|
||||
CREATE DATABASE IF NOT EXISTS `{{ .Data.data.db_name }}`;
|
||||
CREATE USER IF NOT EXISTS '{{ .Data.data.db_user }}'@'%' IDENTIFIED BY '{{ .Data.data.db_pass }}';
|
||||
GRANT ALL ON `{{ .Data.data.db_name }}`.* to '{{ .Data.data.db_user }}'@'%';
|
||||
{{ else -}}
|
||||
SELECT 'NOOP';
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
EOF
|
||||
destination = "local/bootstrap.sql"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 50
|
||||
}
|
||||
}
|
||||
|
||||
task "grafana" {
|
||||
driver = "docker"
|
||||
|
||||
@ -158,27 +89,19 @@ SELECT 'NOOP';
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
{{ with secret "kv/data/grafana" -}}
|
||||
{{ with secret "kv/data/grafana" }}
|
||||
GF_SECURITY_ADMIN_PASSWORD={{ .Data.data.admin_pw }}
|
||||
GF_SMTP_USER={{ .Data.data.smtp_user }}
|
||||
GF_SMTP_PASSWORD={{ .Data.data.smtp_password }}
|
||||
GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .Data.data.minio_access_key }}
|
||||
GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .Data.data.minio_secret_key }}
|
||||
GRAFANA_ALERT_EMAIL_ADDRESSES={{ .Data.data.alert_email_addresses }}
|
||||
{{ if .Data.data.db_name -}}
|
||||
# Database storage
|
||||
GF_DATABASE_TYPE=mysql
|
||||
GF_DATABASE_HOST={{ env "NOMAD_UPSTREAM_ADDR_mysql_server" }}
|
||||
GF_DATABASE_NAME={{ .Data.data.db_name }}
|
||||
GF_DATABASE_USER={{ .Data.data.db_user }}
|
||||
GF_DATABASE_PASSWORD={{ .Data.data.db_pass }}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ with secret "kv/data/slack" -}}
|
||||
{{ end }}
|
||||
{{ with secret "kv/data/slack" }}
|
||||
SLACK_BOT_URL={{ .Data.data.bot_url }}
|
||||
SLACK_BOT_TOKEN={{ .Data.data.bot_token }}
|
||||
SLACK_HOOK_URL={{ .Data.data.hook_url }}
|
||||
{{ end -}}
|
||||
{{ end }}
|
||||
EOF
|
||||
env = true
|
||||
destination = "secrets/conf.env"
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,3 +1,8 @@
|
||||
variable "consul_address" {
|
||||
type = string
|
||||
description = "address of consul server for dynamic scraping"
|
||||
}
|
||||
|
||||
resource "nomad_job" "exporters" {
|
||||
hcl2 {
|
||||
enabled = true
|
||||
@ -15,6 +20,12 @@ data "consul_nodes" "all-nodes" {
|
||||
resource "nomad_job" "prometheus" {
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
# TODO: May not need this because we have an env variable for that
|
||||
# "consul_address" = "${var.consul_address}",
|
||||
# TODO: Should this be a list?
|
||||
"consul_address" = "http://${data.consul_nodes.all-nodes.nodes[0].address}:8500",
|
||||
}
|
||||
}
|
||||
|
||||
jobspec = file("${path.module}/prometheus.nomad")
|
@ -1,3 +1,9 @@
|
||||
variable "consul_address" {
|
||||
type = string
|
||||
description = "Full address of Consul instance to get catalog from"
|
||||
default = "http://127.0.0.1:5400"
|
||||
}
|
||||
|
||||
job "prometheus" {
|
||||
datacenters = ["dc1"]
|
||||
|
||||
@ -59,7 +65,7 @@ job "prometheus" {
|
||||
ports = ["web"]
|
||||
args = [
|
||||
"--config.file=/etc/prometheus/config/prometheus.yml",
|
||||
"--storage.tsdb.path=${NOMAD_ALLOC_DIR}/data/tsdb",
|
||||
"--storage.tsdb.path=/prometheus",
|
||||
"--web.listen-address=0.0.0.0:9090",
|
||||
"--web.console.libraries=/usr/share/prometheus/console_libraries",
|
||||
"--web.console.templates=/usr/share/prometheus/consoles",
|
||||
@ -85,13 +91,27 @@ scrape_configs:
|
||||
- targets:
|
||||
- 0.0.0.0:9090
|
||||
|
||||
- job_name: "nomad_server"
|
||||
metrics_path: "/v1/metrics"
|
||||
params:
|
||||
format:
|
||||
- "prometheus"
|
||||
consul_sd_configs:
|
||||
- server: "${var.consul_address}"
|
||||
# - server: "{{ env "CONSUL_HTTP_ADDR" }}"
|
||||
services:
|
||||
- "nomad"
|
||||
tags:
|
||||
- "http"
|
||||
|
||||
- job_name: "nomad_client"
|
||||
metrics_path: "/v1/metrics"
|
||||
params:
|
||||
format:
|
||||
- "prometheus"
|
||||
consul_sd_configs:
|
||||
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
|
||||
- server: "${var.consul_address}"
|
||||
# - server: "{{ env "CONSUL_HTTP_ADDR" }}"
|
||||
services:
|
||||
- "nomad-client"
|
||||
|
||||
@ -101,7 +121,8 @@ scrape_configs:
|
||||
format:
|
||||
- "prometheus"
|
||||
consul_sd_configs:
|
||||
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
|
||||
- server: "${var.consul_address}"
|
||||
# - server: "{{ env "CONSUL_HTTP_ADDR" }}"
|
||||
services:
|
||||
- "consul"
|
||||
relabel_configs:
|
||||
@ -112,7 +133,8 @@ scrape_configs:
|
||||
- job_name: "exporters"
|
||||
metrics_path: "/metrics"
|
||||
consul_sd_configs:
|
||||
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
|
||||
- server: "${var.consul_address}"
|
||||
# - server: "{{ env "CONSUL_HTTP_ADDR" }}"
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_consul_service]
|
||||
action: drop
|
||||
@ -134,7 +156,8 @@ scrape_configs:
|
||||
- job_name: "envoy"
|
||||
metrics_path: "/metrics"
|
||||
consul_sd_configs:
|
||||
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
|
||||
- server: "${var.consul_address}"
|
||||
# - server: "{{ env "CONSUL_HTTP_ADDR" }}"
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_consul_service]
|
||||
action: keep
|
||||
@ -160,7 +183,7 @@ scrape_configs:
|
||||
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 300
|
||||
memory = 200
|
||||
}
|
||||
}
|
||||
}
|
@ -2,28 +2,26 @@
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/consul" {
|
||||
version = "2.16.2"
|
||||
version = "2.15.0"
|
||||
hashes = [
|
||||
"h1:epldE7sZPBTQHnWEA4WlNJIOVT1UEX+/02SMg5nniaE=",
|
||||
"zh:0a2e11ca2ba650954951a087a1daec95eee2f3000456b295409a9880c4a10b1a",
|
||||
"zh:34f6bda06a0d1c213fa8d87d4313687681e67bc8c40c4cbaa7dbe59ce24a4f7e",
|
||||
"zh:5b85cf93db11ee890f720c317a38158927071feb634855786a0c0cd65825a43c",
|
||||
"zh:75ef915f3d087e6045751a66fbb7066a852a0944ec8c97200d1134dd84df7ffc",
|
||||
"zh:8a4a95697bd91ad51a581c12fe50ac61a114afba27895d027f77ac4154a7ea15",
|
||||
"zh:973d538c8d72793861a1ac9718249a9493f417a2b5096846367560054fd843b9",
|
||||
"zh:9feb2bdc06fdc2d8370cc9aad9a0c69e7e5ae38aac43f315c3f57507c57be030",
|
||||
"zh:c5709672d0afecbbe298bf519741ebcb9d04f02a73b5ee0c186dfa241aa5a524",
|
||||
"zh:c65c60570de6da7190e1e7762577655a463caeb59bc5d38e33034821ed0cbcb9",
|
||||
"zh:c958d6282650fc472aade61d5df4300936033f43cfb898293ef86aceccdfdf1d",
|
||||
"zh:cdd3632c81e1d11d3becd193aaa061688840f39147950c45c4301d042743ae6a",
|
||||
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
|
||||
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
|
||||
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
|
||||
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
|
||||
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
|
||||
"zh:32fa31329731db2bf4b7d0f09096416ca146f05b58f4482bbd4ee0f28cefbbcc",
|
||||
"zh:59136b73d3abe7cc5b06d9e12d123ad21298ca86ed49a4060a3cd7c2a28a74a1",
|
||||
"zh:a191f3210773ca25c543a92f2d392b85e6a053d596293655b1f25b33eb843b4c",
|
||||
"zh:b8b6033cf0687eadc1099f11d9fb2ca9429ff40c2d85bd6cb047c0f6bc5d5d8d",
|
||||
"zh:bb7d67ed28aa9b28fc5154161af003383f940b2beda0d4577857cad700f39cd1",
|
||||
"zh:be615288f59327b975532a1999deab60a022e6819fe80e5a32526155210ecbba",
|
||||
"zh:de1e3d5c34eef87eb301e74717754babb6dc8e19e3a964919e1165c5a076a719",
|
||||
"zh:eb8c61b20d8ce2bfff9f735ca8456a0d6368af13aa1f43866f61c70f88cc491c",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/nomad" {
|
||||
version = "1.4.16"
|
||||
hashes = [
|
||||
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
|
||||
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
20
nomad/nextcloud/.terraform.lock.hcl
Normal file
20
nomad/nextcloud/.terraform.lock.hcl
Normal file
@ -0,0 +1,20 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/nomad" {
|
||||
version = "1.4.16"
|
||||
hashes = [
|
||||
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
||||
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
|
||||
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
|
||||
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
|
||||
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
|
||||
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
|
||||
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
|
||||
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
|
||||
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
|
||||
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
|
||||
]
|
||||
}
|
@ -24,23 +24,15 @@ locals {
|
||||
vault_node_address = "http://${local.vault_node.node_address}:${local.vault_node.port}"
|
||||
}
|
||||
|
||||
# Configure the Vault provider
|
||||
provider "vault" {
|
||||
address = length(var.vault_address) == 0 ? local.vault_node_address : var.vault_address
|
||||
token = var.vault_token
|
||||
}
|
||||
|
||||
# Something that should exist in a post bootstrap module, right now module includes bootstrapping
|
||||
# which requries Admin
|
||||
# data "vault_nomad_access_token" "deploy" {
|
||||
# backend = "nomad"
|
||||
# role = "deploy"
|
||||
# }
|
||||
|
||||
# Configure the Nomad provider
|
||||
provider "nomad" {
|
||||
address = length(var.nomad_address) == 0 ? local.nomad_node_address : var.nomad_address
|
||||
address = local.nomad_node_address
|
||||
secret_id = var.nomad_secret_id
|
||||
# secret_id = length(var.nomad_secret_id) == 0 ? data.vault_nomad_access_token.admin.secret_id : var.nomad_secret_id
|
||||
region = "global"
|
||||
region = "global"
|
||||
}
|
||||
|
||||
# Configure the Vault provider
|
||||
provider "vault" {
|
||||
address = local.vault_node_address
|
||||
token = var.vault_token
|
||||
}
|
||||
|
@ -1,45 +0,0 @@
|
||||
---
|
||||
- name: Recovery Consul
|
||||
hosts: consul_instances
|
||||
|
||||
tasks:
|
||||
- name: Stop Consul
|
||||
systemd:
|
||||
name: consul
|
||||
state: stopped
|
||||
become: true
|
||||
|
||||
- name: Get node-id
|
||||
slurp:
|
||||
src: /opt/consul/node-id
|
||||
register: consul_node_id
|
||||
become: true
|
||||
|
||||
- name: Node Id
|
||||
debug:
|
||||
msg: "node_id: {{ consul_node_id.content }}"
|
||||
|
||||
- name: Address
|
||||
debug:
|
||||
msg: "address: {{ ansible_default_ipv4.address }}"
|
||||
|
||||
- name: Save
|
||||
copy:
|
||||
dest: "/opt/consul/raft/peers.json"
|
||||
content: |
|
||||
[
|
||||
{% for host in ansible_play_hosts|reject('equalto', inventory_hostname) -%}
|
||||
{
|
||||
"id": "{{ hostvars[host].consul_node_id.content }}",
|
||||
"address": "{{ hostvars[host].ansible_default_ipv4.address }}:8300",
|
||||
"non_voter": false
|
||||
}{% if not loop.last %},{% endif %}
|
||||
{% endfor -%}
|
||||
]
|
||||
become: true
|
||||
|
||||
- name: Restart Consul
|
||||
systemd:
|
||||
name: consul
|
||||
state: restarted
|
||||
become: true
|
@ -1,45 +0,0 @@
|
||||
---
|
||||
- name: Recovery Nomad
|
||||
hosts: nomad_instances
|
||||
|
||||
tasks:
|
||||
- name: Stop Nomad
|
||||
systemd:
|
||||
name: nomad
|
||||
state: stopped
|
||||
become: true
|
||||
|
||||
- name: Get node-id
|
||||
slurp:
|
||||
src: /var/nomad/server/node-id
|
||||
register: nomad_node_id
|
||||
become: true
|
||||
|
||||
- name: Node Id
|
||||
debug:
|
||||
msg: "node_id: {{ nomad_node_id.content }}"
|
||||
|
||||
- name: Address
|
||||
debug:
|
||||
msg: "address: {{ ansible_default_ipv4.address }}"
|
||||
|
||||
- name: Save
|
||||
copy:
|
||||
dest: /var/nomad/server/raft/peers.json
|
||||
content: |
|
||||
[
|
||||
{% for host in ansible_play_hosts|reject('equalto', inventory_hostname) -%}
|
||||
{
|
||||
"id": "{{ hostvars[host].nomad_node_id.content }}",
|
||||
"address": "{{ hostvars[host].ansible_default_ipv4.address }}:4647",
|
||||
"non_voter": false
|
||||
}{% if not loop.last %},{% endif %}
|
||||
{% endfor -%}
|
||||
]
|
||||
become: true
|
||||
|
||||
- name: Restart Nomad
|
||||
systemd:
|
||||
name: nomad
|
||||
state: restarted
|
||||
become: true
|
38
nomad/redis/.terraform.lock.hcl
Normal file
38
nomad/redis/.terraform.lock.hcl
Normal file
@ -0,0 +1,38 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/consul" {
|
||||
version = "2.15.0"
|
||||
hashes = [
|
||||
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
|
||||
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
|
||||
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
|
||||
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
|
||||
"zh:32fa31329731db2bf4b7d0f09096416ca146f05b58f4482bbd4ee0f28cefbbcc",
|
||||
"zh:59136b73d3abe7cc5b06d9e12d123ad21298ca86ed49a4060a3cd7c2a28a74a1",
|
||||
"zh:a191f3210773ca25c543a92f2d392b85e6a053d596293655b1f25b33eb843b4c",
|
||||
"zh:b8b6033cf0687eadc1099f11d9fb2ca9429ff40c2d85bd6cb047c0f6bc5d5d8d",
|
||||
"zh:bb7d67ed28aa9b28fc5154161af003383f940b2beda0d4577857cad700f39cd1",
|
||||
"zh:be615288f59327b975532a1999deab60a022e6819fe80e5a32526155210ecbba",
|
||||
"zh:de1e3d5c34eef87eb301e74717754babb6dc8e19e3a964919e1165c5a076a719",
|
||||
"zh:eb8c61b20d8ce2bfff9f735ca8456a0d6368af13aa1f43866f61c70f88cc491c",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/nomad" {
|
||||
version = "1.4.16"
|
||||
hashes = [
|
||||
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
||||
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
|
||||
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
|
||||
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
|
||||
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
|
||||
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
|
||||
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
|
||||
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
|
||||
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
|
||||
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
|
||||
]
|
||||
}
|
@ -7,7 +7,7 @@ roles:
|
||||
- src: https://github.com/IamTheFij/ansible-nomad.git
|
||||
name: ansible-nomad
|
||||
scm: git
|
||||
version: my-main
|
||||
version: install-repo
|
||||
- src: https://github.com/ansible-community/ansible-vault.git
|
||||
name: ansible-vault
|
||||
scm: git
|
||||
|
@ -6,7 +6,6 @@
|
||||
# sticky_disk = bool
|
||||
# args = json(list[str])
|
||||
# resources = dict(cpu = int, mem = int)
|
||||
# env = json(dict(str: any))
|
||||
# templates = json(list(dict(
|
||||
# data = str,
|
||||
# dest = str,
|
||||
@ -15,19 +14,9 @@
|
||||
# left_delimiter = str,
|
||||
# right_delimiter = str,
|
||||
# )))
|
||||
# host_volumes = json(list(dict(
|
||||
# name = str,
|
||||
# dest = str,
|
||||
# read_only = bool,
|
||||
# )))
|
||||
# healthcheck = "/"
|
||||
# upstreams = json(list(dict(
|
||||
# destination_name = str,
|
||||
# local_bind_port = int
|
||||
# )))
|
||||
# mysql = bool
|
||||
# redis = bool
|
||||
# vault = bool
|
||||
job "[[.name]]" {
|
||||
region = "global"
|
||||
datacenters = ["dc1"]
|
||||
@ -35,17 +24,17 @@ job "[[.name]]" {
|
||||
type = "service"
|
||||
|
||||
group "[[.name]]" {
|
||||
[[ with .count ]]count = [[ . ]][[ end ]]
|
||||
[[ with .count ]]count = [[ . ]][[end]]
|
||||
network {
|
||||
mode = "bridge"
|
||||
[[ if not (empty .service_port) -]]
|
||||
[[ if not (empty .service_port) ]]
|
||||
port "main" {
|
||||
[[ if default false .ingress -]]
|
||||
[[ if default false .ingress ]]
|
||||
host_network = "loopback"
|
||||
[[ end -]]
|
||||
to = [[ .service_port ]]
|
||||
[[ end ]]
|
||||
to = [[.service_port]]
|
||||
}
|
||||
[[ end -]]
|
||||
[[ end ]]
|
||||
}
|
||||
|
||||
[[ if default false .sticky_disk ]]
|
||||
@ -55,16 +44,6 @@ job "[[.name]]" {
|
||||
}
|
||||
[[ end ]]
|
||||
|
||||
[[ with .host_volumes -]]
|
||||
[[ range $v := . | parseJSON -]]
|
||||
volume "[[ $v.name ]]" {
|
||||
type = "host"
|
||||
read_only = [[ $v.read_only ]]
|
||||
source = "[[ $v.name ]]"
|
||||
}
|
||||
[[ end ]]
|
||||
[[ end -]]
|
||||
|
||||
[[ if not (empty .service_port) ]]
|
||||
service {
|
||||
name = "[[.name | replace "_" "-"]]"
|
||||
@ -74,27 +53,19 @@ job "[[.name]]" {
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = [[ .service_port ]]
|
||||
[[ if default false .mysql -]]
|
||||
local_service_port = [[.service_port]]
|
||||
[[ if default false .mysql ]]
|
||||
upstreams {
|
||||
destination_name = "mysql-server"
|
||||
local_bind_port = 4040
|
||||
}
|
||||
[[ end -]]
|
||||
[[ if default false .redis -]]
|
||||
[[ if default false .redis ]]
|
||||
upstreams {
|
||||
destination_name = "redis"
|
||||
local_bind_port = 6379
|
||||
}
|
||||
[[ end -]]
|
||||
[[ with .upstreams -]]
|
||||
[[range $u := . | parseJSON -]]
|
||||
upstreams {
|
||||
destination_name = "[[ $u.destination_name ]]"
|
||||
local_bind_port = [[ $u.local_bind_port ]]
|
||||
}
|
||||
[[ end ]]
|
||||
[[ end -]]
|
||||
}
|
||||
}
|
||||
|
||||
@ -122,13 +93,10 @@ job "[[.name]]" {
|
||||
[[ if default false .ingress -]]
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.[[.name]].entryPoints=websecure",
|
||||
[[ if not (empty .ingress_rule) -]]
|
||||
"traefik.http.routers.[[.name]].rule=[[.ingress_rule]]",
|
||||
[[ end -]]
|
||||
[[ end -]]
|
||||
]
|
||||
}
|
||||
[[ end -]]
|
||||
[[ end ]]
|
||||
|
||||
task "[[.name]]" {
|
||||
driver = "docker"
|
||||
@ -137,51 +105,32 @@ job "[[.name]]" {
|
||||
image = "[[.image]]"
|
||||
[[ if not (empty .service_port) -]]
|
||||
ports = ["main"]
|
||||
[[ end -]]
|
||||
[[- end ]]
|
||||
[[ if not (empty .args) -]]
|
||||
args = ["[[ .args | parseJSON | join `", "` ]]"]
|
||||
[[ end -]]
|
||||
[[- end ]]
|
||||
|
||||
[[ with .templates -]]
|
||||
[[ range $t := . | parseJSON -]]
|
||||
[[ with .templates]]
|
||||
[[ range $t := . | parseJSON ]]
|
||||
mount {
|
||||
type = "bind"
|
||||
target = "[[ $t.dest ]]"
|
||||
source = "local/[[ $t.dest ]]"
|
||||
}
|
||||
[[ end ]]
|
||||
[[ end -]]
|
||||
[[ end ]]
|
||||
}
|
||||
|
||||
[[ if default false .vault -]]
|
||||
vault {
|
||||
policies = [
|
||||
"access-tables",
|
||||
"nomad-task",
|
||||
]
|
||||
}
|
||||
[[ end -]]
|
||||
|
||||
[[ with .env -]]
|
||||
env = {
|
||||
[[ range $k, $v := . | parseJSON -]]
|
||||
[[- range $k, $v := . ]]
|
||||
"[[$k]]" = "[[$v]]"
|
||||
[[ end -]]
|
||||
}
|
||||
[[ end -]]
|
||||
|
||||
[[ with .host_volumes -]]
|
||||
[[ range $v := . | parseJSON -]]
|
||||
volume_mount {
|
||||
volume = "[[ $v.name ]]"
|
||||
destination = "[[ $v.dest ]]"
|
||||
read_only = [[ $v.read_only ]]
|
||||
[[- end ]]
|
||||
}
|
||||
[[ end ]]
|
||||
[[ end -]]
|
||||
|
||||
[[ with .templates -]]
|
||||
[[ range $t := . | parseJSON -]]
|
||||
[[ with .templates ]]
|
||||
[[ range $t := . | parseJSON ]]
|
||||
template {
|
||||
data = <<EOF
|
||||
[[ $t.data ]]
|
||||
@ -193,15 +142,15 @@ EOF
|
||||
[[ with $t.change_signal ]]change_signal = "[[ . ]]"[[ end -]]
|
||||
[[ with $t.env ]]env = [[ . ]][[ end ]]
|
||||
}
|
||||
[[ end -]]
|
||||
[[ end -]]
|
||||
[[ end ]]
|
||||
[[ end ]]
|
||||
|
||||
[[ with .resources -]]
|
||||
[[ with .resources ]]
|
||||
resources {
|
||||
cpu = [[ .cpu ]]
|
||||
memory = [[ .memory ]]
|
||||
}
|
||||
[[ end -]]
|
||||
[[ end ]]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,26 @@
|
||||
module "services" {
|
||||
source = "./services"
|
||||
module "nextcloud" {
|
||||
source = "./nextcloud"
|
||||
|
||||
depends_on = [module.databases, module.core]
|
||||
depends_on = [module.databases]
|
||||
}
|
||||
|
||||
module "backups" {
|
||||
source = "./backups"
|
||||
|
||||
depends_on = [module.databases]
|
||||
}
|
||||
|
||||
module "media" {
|
||||
source = "./media"
|
||||
}
|
||||
|
||||
resource "nomad_job" "whoami" {
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
"count" = "${2 * length(data.consul_service.nomad.service)}",
|
||||
}
|
||||
}
|
||||
|
||||
jobspec = file("${path.module}/whoami.nomad")
|
||||
}
|
||||
|
@ -1,40 +0,0 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/consul" {
|
||||
version = "2.16.2"
|
||||
hashes = [
|
||||
"h1:epldE7sZPBTQHnWEA4WlNJIOVT1UEX+/02SMg5nniaE=",
|
||||
"zh:0a2e11ca2ba650954951a087a1daec95eee2f3000456b295409a9880c4a10b1a",
|
||||
"zh:34f6bda06a0d1c213fa8d87d4313687681e67bc8c40c4cbaa7dbe59ce24a4f7e",
|
||||
"zh:5b85cf93db11ee890f720c317a38158927071feb634855786a0c0cd65825a43c",
|
||||
"zh:75ef915f3d087e6045751a66fbb7066a852a0944ec8c97200d1134dd84df7ffc",
|
||||
"zh:8a4a95697bd91ad51a581c12fe50ac61a114afba27895d027f77ac4154a7ea15",
|
||||
"zh:973d538c8d72793861a1ac9718249a9493f417a2b5096846367560054fd843b9",
|
||||
"zh:9feb2bdc06fdc2d8370cc9aad9a0c69e7e5ae38aac43f315c3f57507c57be030",
|
||||
"zh:c5709672d0afecbbe298bf519741ebcb9d04f02a73b5ee0c186dfa241aa5a524",
|
||||
"zh:c65c60570de6da7190e1e7762577655a463caeb59bc5d38e33034821ed0cbcb9",
|
||||
"zh:c958d6282650fc472aade61d5df4300936033f43cfb898293ef86aceccdfdf1d",
|
||||
"zh:cdd3632c81e1d11d3becd193aaa061688840f39147950c45c4301d042743ae6a",
|
||||
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/nomad" {
|
||||
version = "1.4.19"
|
||||
hashes = [
|
||||
"h1:EdBny2gaLr/IE+l+6csyCKeIGFMYZ/4tHKpcbS7ArgE=",
|
||||
"zh:2f3ceeb3318a6304026035b0ac9ee3e52df04913bb9ee78827e58c5398b41254",
|
||||
"zh:3fbe76c7d957d20dfe3c8c0528b33084651f22a95be9e0452b658e0922916e2a",
|
||||
"zh:595671a05828cfe6c42ef73aac894ac39f81a52cc662a76f37eb74ebe04ddf75",
|
||||
"zh:5d76e8788d2af3e60daf8076babf763ec887480bbb9734baccccd8fcddf4f03e",
|
||||
"zh:676985afeaca6e67b22d60d43fd0ed7055763029ffebc3026089fe2fd3b4a288",
|
||||
"zh:69152ce6164ac999a640cff962ece45208270e1ac37c10dac484eeea5cf47275",
|
||||
"zh:6da0b15c05b81f947ec8e139bd81eeeb05c0d36eb5a967b985d0625c60998b40",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:822c0a3bbada5e38099a379db8b2e339526843699627c3be3664cc3b3752bab7",
|
||||
"zh:af23af2f98a84695b25c8eba7028a81ad4aad63c44aefb79e01bbe2dc82e7f78",
|
||||
"zh:e36cac9960b7506d92925b667254322520966b9c3feb3ca6102e57a1fb9b1761",
|
||||
"zh:ffd1e096c1cc35de879c740a91918e9f06b627818a3cb4b1d87b829b54a6985f",
|
||||
]
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
resource "nomad_job" "backups" {
|
||||
jobspec = templatefile("${path.module}/backup.nomad", {
|
||||
module_path = "${path.module}",
|
||||
batch_node = null,
|
||||
})
|
||||
}
|
||||
|
||||
# Get Nomad clients from Consul
|
||||
# data "consul_service" "nomad" {
|
||||
# name = "nomad-client"
|
||||
# }
|
||||
|
||||
resource "nomad_job" "backups-oneoff" {
|
||||
# TODO: Get list of nomad hosts dynamically
|
||||
for_each = toset(["n1", "n2"])
|
||||
# for_each = toset([
|
||||
# for node in data.consul_service.nomad.service :
|
||||
# node.node_name
|
||||
# ])
|
||||
jobspec = templatefile("${path.module}/backup.nomad", {
|
||||
module_path = "${path.module}",
|
||||
batch_node = each.key,
|
||||
})
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
job "lldap" {
|
||||
schedule = "@daily"
|
||||
|
||||
config {
|
||||
repo = "rclone::ftp,env_auth:/nomad/lldap"
|
||||
passphrase = env("BACKUP_PASSPHRASE")
|
||||
}
|
||||
|
||||
# sqlite "Backup database" {
|
||||
# path = "/data/lldap/users.db"
|
||||
# # sqlite3 /data/lldap/users.db .backup /data/lldap/users.db.bak
|
||||
# dump_to = "/data/lldap/users.db.bak"
|
||||
# }
|
||||
|
||||
backup {
|
||||
paths = ["/data/lldap"]
|
||||
# Because path is absolute
|
||||
restore_opts {
|
||||
Target = "/"
|
||||
}
|
||||
}
|
||||
|
||||
forget {
|
||||
KeepLast = 2
|
||||
Prune = true
|
||||
}
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
job "nzbget" {
|
||||
schedule = "@daily"
|
||||
|
||||
config {
|
||||
repo = "rclone::ftp,env_auth:/nomad/nzbget"
|
||||
passphrase = env("BACKUP_PASSPHRASE")
|
||||
}
|
||||
|
||||
backup {
|
||||
paths = ["/data/nzbget"]
|
||||
# Because path is absolute
|
||||
restore_opts {
|
||||
Target = "/"
|
||||
}
|
||||
}
|
||||
|
||||
forget {
|
||||
KeepLast = 2
|
||||
Prune = true
|
||||
}
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
job "sonarr" {
|
||||
schedule = "@daily"
|
||||
|
||||
config {
|
||||
repo = "rclone::ftp,env_auth:/nomad/sonarr"
|
||||
passphrase = env("BACKUP_PASSPHRASE")
|
||||
}
|
||||
|
||||
# sqlite "Backup database" {
|
||||
# path = "/data/lldap/users.db"
|
||||
# # sqlite3 /data/lldap/users.db .backup /data/lldap/users.db.bak
|
||||
# dump_to = "/data/lldap/users.db.bak"
|
||||
# }
|
||||
|
||||
backup {
|
||||
paths = ["/data/sonarr"]
|
||||
# Because path is absolute
|
||||
restore_opts {
|
||||
Target = "/"
|
||||
}
|
||||
}
|
||||
|
||||
forget {
|
||||
KeepLast = 2
|
||||
Prune = true
|
||||
}
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
resource "consul_service" "homeassistant" {
|
||||
name = "hass"
|
||||
node = consul_node.homeassistant.name
|
||||
port = 8123
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.consulcatalog.connect=false",
|
||||
"traefik.http.routers.hass.entryPoints=websecure",
|
||||
]
|
||||
|
||||
check {
|
||||
check_id = "homeassistant:hass"
|
||||
status = "passing"
|
||||
name = "Home Assistant Health Check"
|
||||
http = "192.168.3.65:8123"
|
||||
interval = "30s"
|
||||
timeout = "10s"
|
||||
}
|
||||
}
|
||||
|
||||
resource "consul_node" "homeassistant" {
|
||||
name = "homeassistant"
|
||||
address = "192.168.3.65"
|
||||
|
||||
meta = {
|
||||
"external-node" = "true"
|
||||
"external-probe" = "true"
|
||||
}
|
||||
}
|
@ -1,201 +0,0 @@
|
||||
job "ipdvr" {
|
||||
region = "global"
|
||||
datacenters = ["dc1"]
|
||||
|
||||
type = "service"
|
||||
|
||||
group "nzbget" {
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "main" {
|
||||
host_network = "loopback"
|
||||
to = 6789
|
||||
}
|
||||
}
|
||||
|
||||
volume "nzbget-data" {
|
||||
type = "host"
|
||||
read_only = false
|
||||
source = "nzbget-data"
|
||||
}
|
||||
|
||||
volume "download" {
|
||||
type = "host"
|
||||
read_only = false
|
||||
source = "download"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "nzbget"
|
||||
port = "main"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 6789
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 20
|
||||
memory_max = 50
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# check {
|
||||
# type = "http"
|
||||
# path = "/"
|
||||
# port = "main"
|
||||
# interval = "10s"
|
||||
# timeout = "10s"
|
||||
# }
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.nzbget.entryPoints=websecure",
|
||||
]
|
||||
}
|
||||
|
||||
task "nzbget" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "linuxserver/nzbget"
|
||||
ports = ["main"]
|
||||
}
|
||||
|
||||
env = {
|
||||
"PGID" = 100
|
||||
"PUID" = 1001
|
||||
"TZ" = "America/Los_Angeles"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "nzbget-data"
|
||||
destination = "/config"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "download"
|
||||
destination = "/downloads"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 200
|
||||
memory = 200
|
||||
memory_max = 500
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
group "sonarr" {
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "main" {
|
||||
host_network = "loopback"
|
||||
to = 8989
|
||||
}
|
||||
}
|
||||
|
||||
volume "sonarr-data" {
|
||||
type = "host"
|
||||
read_only = false
|
||||
source = "sonarr-data"
|
||||
}
|
||||
|
||||
volume "tv-sonarr" {
|
||||
type = "host"
|
||||
read_only = false
|
||||
source = "tv-sonarr"
|
||||
}
|
||||
|
||||
volume "download" {
|
||||
type = "host"
|
||||
read_only = false
|
||||
source = "download"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "sonarr"
|
||||
port = "main"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 8989
|
||||
upstreams {
|
||||
destination_name = "nzbget"
|
||||
local_bind_port = 6789
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sidecar_task {
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 20
|
||||
memory_max = 50
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# check {
|
||||
# type = "http"
|
||||
# path = "/"
|
||||
# port = "main"
|
||||
# interval = "10s"
|
||||
# timeout = "10s"
|
||||
# }
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.sonarr.entryPoints=websecure",
|
||||
]
|
||||
}
|
||||
|
||||
task "sonarr" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "linuxserver/sonarr"
|
||||
ports = ["main"]
|
||||
}
|
||||
|
||||
env = {
|
||||
"PGID" = 100
|
||||
"PUID" = 1001
|
||||
"TZ" = "America/Los_Angeles"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "sonarr-data"
|
||||
destination = "/config"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "tv-sonarr"
|
||||
destination = "/tv"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "download"
|
||||
destination = "/downloads"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 300
|
||||
memory_max = 500
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,109 +0,0 @@
|
||||
# module "nextcloud" {
|
||||
# source = "./nextcloud"
|
||||
#
|
||||
# depends_on = [module.databases]
|
||||
# }
|
||||
|
||||
module "backups" {
|
||||
source = "./backups"
|
||||
|
||||
# In parent module
|
||||
# depends_on = [module.databases]
|
||||
}
|
||||
|
||||
module "media" {
|
||||
source = "./media"
|
||||
}
|
||||
|
||||
resource "nomad_job" "whoami" {
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
"count" = 1,
|
||||
# "count" = "${2 * length(data.consul_service.nomad.service)}",
|
||||
}
|
||||
}
|
||||
|
||||
jobspec = file("${path.module}/whoami.nomad")
|
||||
}
|
||||
|
||||
resource "nomad_job" "ipdvr" {
|
||||
jobspec = file("${path.module}/ip-dvr.nomad")
|
||||
}
|
||||
|
||||
resource "consul_config_entry" "nzbget_intents" {
|
||||
depends_on = [nomad_job.ipdvr]
|
||||
|
||||
name = "nzbget"
|
||||
kind = "service-intentions"
|
||||
|
||||
config_json = jsonencode({
|
||||
Sources = [
|
||||
{
|
||||
Action = "allow"
|
||||
Name = "sonarr"
|
||||
Precedence = 9
|
||||
Type = "consul"
|
||||
},
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
# module "nzbget" {
|
||||
# source "./levant"
|
||||
#
|
||||
# template_path = "service.nomad"
|
||||
# variables = {
|
||||
# name = "nzbget"
|
||||
# image = "linuxserver/nzbget"
|
||||
# service_port = 6789
|
||||
# ingress = true
|
||||
# env = jsonencode({
|
||||
# PGID = 100
|
||||
# PUID = 1001
|
||||
# TZ = "America/Los_Angeles"
|
||||
# })
|
||||
# host_volumes = jsonencode([
|
||||
# {
|
||||
# name = "download"
|
||||
# dest = "/srv/volumes/download"
|
||||
# read_only = false
|
||||
# },
|
||||
# ])
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# module "sonarr" {
|
||||
# source = "./levant"
|
||||
#
|
||||
# template_path = "service.nomad"
|
||||
# variables = {
|
||||
# name = "sonarr"
|
||||
# image = "linuxserver/sonarr"
|
||||
# service_port = 8989
|
||||
# ingress = true
|
||||
# env = jsonencode({
|
||||
# PGID = 100
|
||||
# PUID = 1001
|
||||
# TZ = "America/Los_Angeles"
|
||||
#
|
||||
# })
|
||||
# host_volumes = jsonencode([
|
||||
# {
|
||||
# name = "sonarr-data"
|
||||
# dest = "/config"
|
||||
# read_only = false
|
||||
# },
|
||||
# {
|
||||
# name = "tv-sonarr"
|
||||
# dest = "/srv/volumes/media-write/TV Shows"
|
||||
# read_only = false
|
||||
# },
|
||||
# {
|
||||
# name = "download"
|
||||
# dest = "/srv/volumes/download"
|
||||
# read_only = false
|
||||
# },
|
||||
# ])
|
||||
# }
|
||||
# }
|
@ -9,29 +9,29 @@
|
||||
roles:
|
||||
- role: ansible-consul
|
||||
vars:
|
||||
consul_version: "1.13.3-1"
|
||||
consul_version: "1.12.3-1"
|
||||
consul_install_upgrade: true
|
||||
consul_install_from_repo: true
|
||||
consul_os_repo_prerequisites: []
|
||||
|
||||
consul_node_role: server
|
||||
consul_bootstrap_expect: true
|
||||
consul_bootstrap_expect_value: "{{ [(play_hosts | length), 3] | min }}"
|
||||
|
||||
consul_user: consul
|
||||
consul_manage_user: true
|
||||
consul_group: bin
|
||||
consul_manage_group: true
|
||||
|
||||
consul_architecture_map:
|
||||
x86_64: amd64
|
||||
armhfv6: arm
|
||||
armv7l: arm
|
||||
|
||||
# consul_tls_enable: true
|
||||
consul_connect_enabled: true
|
||||
consul_ports_grpc: 8502
|
||||
consul_client_address: "0.0.0.0"
|
||||
|
||||
# Autopilot
|
||||
consul_autopilot_enable: true
|
||||
consul_autopilot_cleanup_dead_Servers: true
|
||||
|
||||
# Enable metrics
|
||||
consul_config_custom:
|
||||
telemetry:
|
||||
@ -60,12 +60,29 @@
|
||||
|
||||
# If DNS is broken after dnsmasq, then need to set /etc/resolv.conf to something
|
||||
# pointing to 127.0.0.1 and possibly restart Docker and Nomad
|
||||
- name: Update resolv.conf
|
||||
lineinfile:
|
||||
dest: /etc/resolv.conf
|
||||
create: true
|
||||
line: "nameserver 127.0.0.1"
|
||||
become: true
|
||||
|
||||
- name: Add values
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
block:
|
||||
- name: Install python-consul
|
||||
pip:
|
||||
name: python-consul
|
||||
extra_args: --index-url https://pypi.org/simple
|
||||
|
||||
- name: Set hostname
|
||||
consul_kv:
|
||||
host: "{{ inventory_hostname }}"
|
||||
key: global/base_hostname
|
||||
# TODO: propogate this through via Consul and Nomad templates rather than Terraform
|
||||
value: dev.homelab
|
||||
|
||||
- name: Write values
|
||||
consul_kv:
|
||||
host: "{{ inventory_hostname }}"
|
||||
key: "{{ item.key }}"
|
||||
value: "{{ item.value }}"
|
||||
loop: "{{ consul_values | default({}) | dict2items }}"
|
||||
|
||||
- name: Setup Vault cluster
|
||||
hosts: vault_instances
|
||||
@ -76,7 +93,7 @@
|
||||
roles:
|
||||
- name: ansible-vault
|
||||
vars:
|
||||
vault_version: 1.12.0-1
|
||||
vault_version: 1.10.0
|
||||
vault_install_hashi_repo: true
|
||||
vault_harden_file_perms: true
|
||||
vault_bin_path: /usr/bin
|
||||
@ -93,6 +110,7 @@
|
||||
status_code: 200, 429, 472, 473, 501, 503
|
||||
body_format: json
|
||||
return_content: true
|
||||
run_once: true
|
||||
register: vault_status
|
||||
|
||||
- name: Initialize Vault
|
||||
@ -145,24 +163,27 @@
|
||||
- unseal_keys_hex is defined
|
||||
- vault_status.json["sealed"]
|
||||
|
||||
- name: Install Docker
|
||||
hosts: nomad_instances
|
||||
become: true
|
||||
vars:
|
||||
docker_architecture_map:
|
||||
x86_64: amd64
|
||||
armv7l: armhf
|
||||
aarch64: arm64
|
||||
docker_apt_arch: "{{ docker_architecture_map[ansible_architecture] }}"
|
||||
docker_compose_arch: "{{ (ansible_architecture == 'armv7l') | ternary('armv7', ansible_architecture) }}"
|
||||
roles:
|
||||
- geerlingguy.docker
|
||||
- name: Bootstrap Vault secrets
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
block:
|
||||
- name: Install hvac
|
||||
pip:
|
||||
name: hvac
|
||||
extra_args: --index-url https://pypi.org/simple
|
||||
|
||||
tasks:
|
||||
- name: Remove snapd
|
||||
package:
|
||||
name: snapd
|
||||
state: absent
|
||||
# TODO: This fails on first run because `root_token` isn't found
|
||||
# Fails after taht too because the kv/ space has not been created yet either! Oh noes!
|
||||
# Maybe move data bootstrapping to after the cluster is bootstrapped
|
||||
- name: Write values
|
||||
community.hashi_vault.vault_write:
|
||||
url: "http://{{ inventory_hostname }}:8200"
|
||||
token: "{{ root_token }}"
|
||||
path: "kv/data/{{ item.key }}"
|
||||
data:
|
||||
data:
|
||||
"{{ item.value }}"
|
||||
loop: "{{ hashi_vault_values | default({}) | dict2items }}"
|
||||
|
||||
# Not on Ubuntu 20.04
|
||||
# - name: Install Podman
|
||||
@ -201,29 +222,15 @@
|
||||
state: mounted
|
||||
fstype: nfs4
|
||||
|
||||
- name: Create Media Library RW NFS mount
|
||||
ansible.posix.mount:
|
||||
src: 192.168.2.10:/Multimedia
|
||||
path: /srv/volumes/media-write
|
||||
opts: proto=tcp,port=2049,rw
|
||||
state: mounted
|
||||
fstype: nfs4
|
||||
|
||||
- name: Create Download RW NFS mount
|
||||
ansible.posix.mount:
|
||||
src: 192.168.2.10:/Download
|
||||
path: /srv/volumes/download
|
||||
opts: proto=tcp,port=2049,rw
|
||||
state: mounted
|
||||
fstype: nfs4
|
||||
|
||||
- name: Create Container NAS RW NFS mount
|
||||
ansible.posix.mount:
|
||||
src: 192.168.2.10:/Container
|
||||
path: /srv/volumes/container
|
||||
opts: proto=tcp,port=2049,rw
|
||||
state: mounted
|
||||
fstype: nfs4
|
||||
- name: Install Docker
|
||||
hosts: nomad_instances
|
||||
become: true
|
||||
vars:
|
||||
deb_arch: "{% if ansible_architecture == 'x86_64' %}amd64{% elif ansible_architecture == 'armv7l' %}armhf{% endif %}"
|
||||
docker_apt_arch: "{{ deb_arch }}"
|
||||
docker_compose_arch: "{{ (ansible_architecture == 'armv7l') | ternary('armv7', ansible_architecture) }}"
|
||||
roles:
|
||||
- geerlingguy.docker
|
||||
|
||||
- name: Build Nomad cluster
|
||||
hosts: nomad_instances
|
||||
@ -235,60 +242,45 @@
|
||||
- name: motioneye-recordings
|
||||
path: /srv/volumes/motioneye-recordings
|
||||
owner: "root"
|
||||
group: "root"
|
||||
group: "bin"
|
||||
mode: "0755"
|
||||
read_only: false
|
||||
- name: media-read
|
||||
path: /srv/volumes/media-write
|
||||
read_only: true
|
||||
- name: media-write
|
||||
path: /srv/volumes/media-write
|
||||
path: /srv/volumes/media-read
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
read_only: false
|
||||
- name: tv-sonarr
|
||||
path: "/srv/volumes/media-write/TV Shows"
|
||||
owner: 1001
|
||||
group: 100
|
||||
mode: "0755"
|
||||
read_only: false
|
||||
- name: download
|
||||
path: /srv/volumes/download
|
||||
owner: 1001
|
||||
group: 100
|
||||
mode: "0755"
|
||||
read_only: false
|
||||
- name: nzbget-data
|
||||
path: /srv/volumes/container/nzbget/config
|
||||
read_only: false
|
||||
- name: gitea-data
|
||||
path: /srv/volumes/container/gitea
|
||||
read_only: false
|
||||
mode: "0777"
|
||||
read_only: true
|
||||
- name: all-volumes
|
||||
path: /srv/volumes
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
mode: "0777"
|
||||
read_only: false
|
||||
|
||||
roles:
|
||||
- name: ansible-nomad
|
||||
vars:
|
||||
nomad_version: "1.4.1-1"
|
||||
nomad_version: "1.3.2-1"
|
||||
nomad_install_remotely: true
|
||||
nomad_install_upgrade: true
|
||||
nomad_allow_purge_config: true
|
||||
|
||||
nomad_meta:
|
||||
# There are issues with v1.23.0 on arm64
|
||||
connect.sidecar_image: envoyproxy/envoy:v1.23.1
|
||||
|
||||
# Where nomad gets installed to
|
||||
nomad_bin_dir: /usr/bin
|
||||
nomad_install_from_repo: true
|
||||
|
||||
nomad_bootstrap_expect: "{{ [(play_hosts | length), 3] | min }}"
|
||||
nomad_raft_protocol: 3
|
||||
# nomad_user: root
|
||||
# nomad_manage_user: true
|
||||
# nomad_group: bin
|
||||
# nomad_manage_group: true
|
||||
|
||||
# Properly map install arch
|
||||
nomad_architecture_map:
|
||||
x86_64: amd64
|
||||
armhfv6: arm
|
||||
armv7l: arm
|
||||
|
||||
nomad_autopilot: true
|
||||
nomad_encrypt_enable: true
|
||||
# nomad_use_consul: true
|
||||
@ -334,6 +326,9 @@
|
||||
|
||||
# Create networks for binding task ports
|
||||
nomad_host_networks:
|
||||
# - name: public
|
||||
# interface: eth0
|
||||
# reserved_ports: "22"
|
||||
- name: nomad-bridge
|
||||
interface: nomad
|
||||
reserved_ports: "22"
|
||||
@ -345,19 +340,11 @@
|
||||
nomad_acl_enabled: true
|
||||
|
||||
# Enable vault integration
|
||||
# HACK: Only talk to local Vault for now because it doesn't have HTTPS
|
||||
# TODO: Would be really great to have this over https and point to vault.consul.service
|
||||
# nomad_vault_address: "https://vault.service.consul:8200"
|
||||
# Right now, each node only talks to it's local Vault, so if that node is rebooted and
|
||||
# that vault is sealed, it will not have access to vault. This is a problem if a node
|
||||
# must reboot.
|
||||
nomad_vault_address: "http://127.0.0.1:8200"
|
||||
# TODO: This fails on first run because the Nomad-Vault integration can't be set up
|
||||
# until Nomad has started. Could maybe figure out if ACLs have been set up and leave
|
||||
# these out until the later play, maybe just bootstrap the nomad-cluster role in Vault
|
||||
# befor Nomad is set up
|
||||
# these out until the later play
|
||||
nomad_vault_address: "http://vault.service.consul:8200"
|
||||
nomad_vault_create_from_role: "nomad-cluster"
|
||||
# TODO: (security) Probably want to restict this to a narrower scoped token
|
||||
nomad_vault_enabled: "{{ root_token is defined }}"
|
||||
nomad_vault_token: "{{ root_token | default('') }}"
|
||||
|
||||
@ -365,36 +352,26 @@
|
||||
ui:
|
||||
enabled: true
|
||||
consul:
|
||||
ui_url: "https://{{ ansible_hostname }}:8500/ui"
|
||||
ui_url: "http://{{ ansible_hostname }}:8500/ui"
|
||||
vault:
|
||||
ui_url: "https://{{ ansible_hostname }}:8200/ui"
|
||||
ui_url: "http://{{ ansible_hostname }}:8200/ui"
|
||||
consul:
|
||||
tags:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.consulcatalog.connect=true"
|
||||
- "traefik.http.routers.nomadclient.entrypoints=websecure"
|
||||
|
||||
- name: Bootstrap Nomad ACLs and scheduler
|
||||
hosts: nomad_instances
|
||||
|
||||
tasks:
|
||||
- name: Start Nomad
|
||||
systemd:
|
||||
state: started
|
||||
name: nomad
|
||||
|
||||
- name: Nomad API reachable?
|
||||
uri:
|
||||
url: "http://127.0.0.1:4646/v1/status/leader"
|
||||
method: GET
|
||||
status_code: 200
|
||||
register: nomad_check_result
|
||||
retries: 6
|
||||
until: nomad_check_result is succeeded
|
||||
delay: 10
|
||||
changed_when: false
|
||||
run_once: true
|
||||
- name: Bootstrap Nomad ACLs
|
||||
hosts: nomad_instances
|
||||
|
||||
tasks:
|
||||
# Need to wait until nomad is running
|
||||
- name: Bootstrap ACLs
|
||||
command:
|
||||
argv:
|
||||
@ -414,6 +391,16 @@
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
|
||||
- name: Look for policy
|
||||
command:
|
||||
argv:
|
||||
- nomad
|
||||
- acl
|
||||
- policy
|
||||
- list
|
||||
run_once: true
|
||||
register: policies
|
||||
|
||||
- name: Read secret
|
||||
command:
|
||||
argv:
|
||||
@ -427,35 +414,9 @@
|
||||
changed_when: false
|
||||
register: read_secretid
|
||||
|
||||
- name: Enable service scheduler preemption
|
||||
command:
|
||||
argv:
|
||||
- nomad
|
||||
- operator
|
||||
- scheduler
|
||||
- set-config
|
||||
- -preempt-system-scheduler=true
|
||||
- -preempt-service-scheduler=true
|
||||
environment:
|
||||
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
|
||||
delegate_to: "{{ play_hosts[0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Look for policy
|
||||
command:
|
||||
argv:
|
||||
- nomad
|
||||
- acl
|
||||
- policy
|
||||
- list
|
||||
environment:
|
||||
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
|
||||
run_once: true
|
||||
register: policies
|
||||
|
||||
- name: Copy policy
|
||||
copy:
|
||||
src: ./acls/nomad-anon-policy.hcl
|
||||
src: ./acls/nomad-anon-bootstrap.hcl
|
||||
dest: /tmp/anonymous.policy.hcl
|
||||
delegate_to: "{{ play_hosts[0] }}"
|
||||
register: anon_policy
|
||||
@ -468,7 +429,7 @@
|
||||
- acl
|
||||
- policy
|
||||
- apply
|
||||
- -description="Anon read only"
|
||||
- -description="Anon RW"
|
||||
- anonymous
|
||||
- /tmp/anonymous.policy.hcl
|
||||
environment:
|
||||
@ -487,13 +448,3 @@
|
||||
nomad_secret_id: "{{ read_secretid.stdout }}"
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
notify:
|
||||
- Restart Nomad
|
||||
|
||||
handlers:
|
||||
- name: Restart Nomad
|
||||
systemd:
|
||||
state: restarted
|
||||
name: nomad
|
||||
retries: 6
|
||||
delay: 5
|
||||
|
@ -92,10 +92,17 @@ EOF
|
||||
group "syslogng" {
|
||||
count = 1
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
# Needs to be on a predictable node for routing
|
||||
# Maybe a loadbalancer could be used for routing from any node
|
||||
value = "n2"
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "main" {
|
||||
to = 514
|
||||
static = 1514
|
||||
}
|
||||
}
|
||||
|
||||
@ -106,8 +113,6 @@ EOF
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
local_service_port = 514
|
||||
|
||||
upstreams {
|
||||
destination_name = "syslogng-promtail"
|
||||
local_bind_port = 1000
|
||||
@ -142,12 +147,11 @@ EOF
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
@version: 3.37
|
||||
@include "scl.conf"
|
||||
@version: 3.22
|
||||
|
||||
source s_network {
|
||||
default-network-drivers(
|
||||
);
|
||||
source s_external {
|
||||
syslog(ip(0.0.0.0) port(1514) transport("tcp"));
|
||||
syslog(ip(0.0.0.0) port(1514) transport("udp"));
|
||||
};
|
||||
|
||||
source s_internal {
|
||||
@ -160,7 +164,7 @@ destination d_loki {
|
||||
};
|
||||
|
||||
log { source(s_internal); destination(d_loki); };
|
||||
log { source(s_network); destination(d_loki); };
|
||||
log { source(s_external); destination(d_loki); };
|
||||
EOF
|
||||
destination = "local/syslog-ng.conf"
|
||||
}
|
38
nomad/traefik/.terraform.lock.hcl
Normal file
38
nomad/traefik/.terraform.lock.hcl
Normal file
@ -0,0 +1,38 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/consul" {
|
||||
version = "2.15.0"
|
||||
hashes = [
|
||||
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
|
||||
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
|
||||
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
|
||||
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
|
||||
"zh:32fa31329731db2bf4b7d0f09096416ca146f05b58f4482bbd4ee0f28cefbbcc",
|
||||
"zh:59136b73d3abe7cc5b06d9e12d123ad21298ca86ed49a4060a3cd7c2a28a74a1",
|
||||
"zh:a191f3210773ca25c543a92f2d392b85e6a053d596293655b1f25b33eb843b4c",
|
||||
"zh:b8b6033cf0687eadc1099f11d9fb2ca9429ff40c2d85bd6cb047c0f6bc5d5d8d",
|
||||
"zh:bb7d67ed28aa9b28fc5154161af003383f940b2beda0d4577857cad700f39cd1",
|
||||
"zh:be615288f59327b975532a1999deab60a022e6819fe80e5a32526155210ecbba",
|
||||
"zh:de1e3d5c34eef87eb301e74717754babb6dc8e19e3a964919e1165c5a076a719",
|
||||
"zh:eb8c61b20d8ce2bfff9f735ca8456a0d6368af13aa1f43866f61c70f88cc491c",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/nomad" {
|
||||
version = "1.4.16"
|
||||
hashes = [
|
||||
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
|
||||
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
||||
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
|
||||
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
|
||||
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
|
||||
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
|
||||
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
|
||||
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
|
||||
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
|
||||
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
|
||||
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
|
||||
]
|
||||
}
|
192
nomad/traefik/traefik.nomad
Normal file
192
nomad/traefik/traefik.nomad
Normal file
@ -0,0 +1,192 @@
|
||||
variable "consul_address" {
|
||||
type = string
|
||||
description = "Full address of Consul instance to get catalog from"
|
||||
default = "http://127.0.0.1:5400"
|
||||
}
|
||||
|
||||
variable "base_hostname" {
|
||||
type = string
|
||||
description = "Base hostname to serve content from"
|
||||
default = "dev.homelab"
|
||||
}
|
||||
|
||||
job "traefik" {
|
||||
datacenters = ["dc1"]
|
||||
type = "system"
|
||||
priority = 100
|
||||
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
value = "ingress"
|
||||
}
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
auto_revert = true
|
||||
}
|
||||
|
||||
group "traefik" {
|
||||
|
||||
network {
|
||||
port "web" {
|
||||
static = 80
|
||||
}
|
||||
port "websecure" {
|
||||
static = 443
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "traefik"
|
||||
port = "web"
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/ping"
|
||||
port = "web"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
|
||||
connect {
|
||||
native = true
|
||||
}
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.traefik_dashboard.entryPoints=websecure",
|
||||
"traefik.http.routers.traefik_dashboard.rule=Host(`traefik.${var.base_hostname}`)",
|
||||
"traefik.http.routers.traefik_dashboard.service=api@internal",
|
||||
"traefik.http.routers.traefik_dashboard.tls=true",
|
||||
]
|
||||
}
|
||||
|
||||
task "traefik" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "traefik:2.6"
|
||||
|
||||
ports = ["web", "websecure"]
|
||||
network_mode = "host"
|
||||
|
||||
mount {
|
||||
type = "bind"
|
||||
target = "/etc/traefik"
|
||||
source = "config"
|
||||
}
|
||||
}
|
||||
|
||||
template {
|
||||
# Avoid conflict with TOML lists [[ ]] and Go templates {{ }}
|
||||
left_delimiter = "<<"
|
||||
right_delimiter = ">>"
|
||||
data = <<EOH
|
||||
[log]
|
||||
level = "DEBUG"
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.web]
|
||||
address = ":80"
|
||||
[entryPoints.web.http]
|
||||
[entryPoints.web.http.redirections]
|
||||
[entryPoints.web.http.redirections.entrypoint]
|
||||
to = "websecure"
|
||||
scheme = "https"
|
||||
|
||||
[entryPoints.websecure]
|
||||
address = ":443"
|
||||
[entryPoints.websecure.http.tls]
|
||||
# certResolver = "letsEncrypt"
|
||||
|
||||
[entryPoints.metrics]
|
||||
address = ":8989"
|
||||
|
||||
[api]
|
||||
dashboard = true
|
||||
|
||||
[ping]
|
||||
entrypoint = "web"
|
||||
|
||||
[metrics]
|
||||
[metrics.prometheus]
|
||||
entrypoint = "metrics"
|
||||
# manualRouting = true
|
||||
|
||||
[providers.file]
|
||||
directory = "/etc/traefik/conf"
|
||||
watch = true
|
||||
|
||||
[providers.consulCatalog]
|
||||
connectAware = true
|
||||
connectByDefault = true
|
||||
exposedByDefault = false
|
||||
defaultRule = "Host(`{{normalize .Name}}.${var.base_hostname}`)"
|
||||
[providers.consulCatalog.endpoint]
|
||||
address = "http://<< env "CONSUL_HTTP_ADDR" >>"
|
||||
EOH
|
||||
destination = "/config/traefik.toml"
|
||||
}
|
||||
|
||||
template {
|
||||
# Avoid conflict with TOML lists [[ ]] and Go templates {{ }}
|
||||
left_delimiter = "<<"
|
||||
right_delimiter = ">>"
|
||||
data = <<EOH
|
||||
[http]
|
||||
[http.routers]
|
||||
[http.routers.nomad]
|
||||
entryPoints = ["websecure"]
|
||||
# middlewares = []
|
||||
service = "nomad"
|
||||
rule = "Host(`nomad.${var.base_hostname}`)"
|
||||
[http.routers.consul]
|
||||
entryPoints = ["websecure"]
|
||||
# middlewares = []
|
||||
service = "consul"
|
||||
rule = "Host(`consul.${var.base_hostname}`)"
|
||||
[http.routers.vault]
|
||||
entryPoints = ["websecure"]
|
||||
# middlewares = []
|
||||
service = "vault"
|
||||
rule = "Host(`vault.${var.base_hostname}`)"
|
||||
|
||||
[http.services]
|
||||
<< with service "nomad-client" ->>
|
||||
[http.services.nomad]
|
||||
[http.services.nomad.loadBalancer]
|
||||
<< range . ->>
|
||||
[[http.services.nomad.loadBalancer.servers]]
|
||||
url = "http://<< .Address >>:<< .Port >>"
|
||||
<< end >>
|
||||
<<- end >>
|
||||
<< with service "consul" ->>
|
||||
[http.services.consul]
|
||||
[http.services.consul.loadBalancer]
|
||||
<< range . ->>
|
||||
[[http.services.consul.loadBalancer.servers]]
|
||||
# Not using .Port because that's an RPC port
|
||||
url = "http://<< .Address >>:8500"
|
||||
<< end >>
|
||||
<<- end >>
|
||||
<< with service "vault" ->>
|
||||
[http.services.vault]
|
||||
[http.services.vault.loadBalancer]
|
||||
<< range . ->>
|
||||
[[http.services.vault.loadBalancer.servers]]
|
||||
url = "http://<< .Address >>:<< .Port >>"
|
||||
<< end >>
|
||||
<<- end >>
|
||||
EOH
|
||||
destination = "/config/conf/route-hashi.toml"
|
||||
change_mode = "noop"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 100
|
||||
memory_max = 200
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
29
nomad/traefik/traefik.tf
Normal file
29
nomad/traefik/traefik.tf
Normal file
@ -0,0 +1,29 @@
|
||||
variable "base_hostname" {
|
||||
type = string
|
||||
description = "Base hostname to serve content from"
|
||||
default = "dev.homelab"
|
||||
}
|
||||
|
||||
variable "consul_address" {
|
||||
type = string
|
||||
description = "address of consul server for dynamic routes"
|
||||
}
|
||||
|
||||
data "consul_nodes" "all-nodes" {
|
||||
query_options {
|
||||
datacenter = "dc1"
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_job" "traefik" {
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
# "consul_address" = "${var.consul_address}",
|
||||
"consul_address" = "http://${data.consul_nodes.all-nodes.nodes[0].address}:8500",
|
||||
"base_hostname" = "${var.base_hostname}",
|
||||
}
|
||||
}
|
||||
|
||||
jobspec = file("${path.module}/traefik.nomad")
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
---
|
||||
- name: Unseal Vault
|
||||
hosts: vault_instances
|
||||
|
||||
tasks:
|
||||
- name: Get Vault status
|
||||
uri:
|
||||
url: http://127.0.0.1:8200/v1/sys/health
|
||||
method: GET
|
||||
status_code: 200, 429, 472, 473, 501, 503
|
||||
body_format: json
|
||||
return_content: true
|
||||
register: vault_status
|
||||
|
||||
- name: Unseal Vault
|
||||
no_log: true
|
||||
command:
|
||||
argv:
|
||||
- "vault"
|
||||
- "operator"
|
||||
- "unseal"
|
||||
- "-address=http://127.0.0.1:8200/"
|
||||
- "{{ item }}"
|
||||
loop: "{{ unseal_keys_hex }}"
|
||||
when:
|
||||
- unseal_keys_hex is defined
|
||||
- vault_status.json["sealed"]
|
@ -3,16 +3,6 @@ variable "consul_address" {
|
||||
default = "http://n1.thefij:8500"
|
||||
}
|
||||
|
||||
variable "vault_address" {
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "nomad_address" {
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "base_hostname" {
|
||||
type = string
|
||||
description = "Base hostname to serve content from"
|
||||
|
5
nomad/vault-kv.tf
Normal file
5
nomad/vault-kv.tf
Normal file
@ -0,0 +1,5 @@
|
||||
resource "vault_mount" "kv" {
|
||||
path = "kv"
|
||||
type = "kv-v2"
|
||||
description = "Catch all kv mount"
|
||||
}
|
@ -50,7 +50,6 @@ job "whoami" {
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.whoami.entryPoints=websecure",
|
||||
"traefik.http.routers.whoami.middlewares=basic-auth@file",
|
||||
]
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user