Compare commits

..

1 Commits

Author SHA1 Message Date
IamTheFij dbaf73e3d6 WIP: Add authelia job 2022-06-23 09:53:26 -07:00
110 changed files with 2447 additions and 8400 deletions

View File

@ -15,8 +15,8 @@ repos:
- id: check-merge-conflict
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/Yelp/detect-secrets
rev: v1.2.0
hooks:
- id: detect-secrets
args: ['--baseline', '.secrets-baseline']
# - repo: https://github.com/Yelp/detect-secrets
# rev: v1.0.3
# hooks:
# - id: detect-secrets
# args: ['--baseline', '.secrets-baseline']

View File

@ -1,214 +0,0 @@
{
"version": "1.2.0",
"plugins_used": [
{
"name": "ArtifactoryDetector"
},
{
"name": "AWSKeyDetector"
},
{
"name": "AzureStorageKeyDetector"
},
{
"name": "Base64HighEntropyString",
"limit": 4.5
},
{
"name": "BasicAuthDetector"
},
{
"name": "CloudantDetector"
},
{
"name": "GitHubTokenDetector"
},
{
"name": "HexHighEntropyString",
"limit": 3.0
},
{
"name": "IbmCloudIamDetector"
},
{
"name": "IbmCosHmacDetector"
},
{
"name": "JwtTokenDetector"
},
{
"name": "KeywordDetector",
"keyword_exclude": ""
},
{
"name": "MailchimpDetector"
},
{
"name": "NpmDetector"
},
{
"name": "PrivateKeyDetector"
},
{
"name": "SendGridDetector"
},
{
"name": "SlackDetector"
},
{
"name": "SoftlayerDetector"
},
{
"name": "SquareOAuthDetector"
},
{
"name": "StripeDetector"
},
{
"name": "TwilioKeyDetector"
}
],
"filters_used": [
{
"path": "detect_secrets.filters.allowlist.is_line_allowlisted"
},
{
"path": "detect_secrets.filters.common.is_baseline_file",
"filename": ".secrets-baseline"
},
{
"path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies",
"min_level": 2
},
{
"path": "detect_secrets.filters.heuristic.is_indirect_reference"
},
{
"path": "detect_secrets.filters.heuristic.is_likely_id_string"
},
{
"path": "detect_secrets.filters.heuristic.is_lock_file"
},
{
"path": "detect_secrets.filters.heuristic.is_not_alphanumeric_string"
},
{
"path": "detect_secrets.filters.heuristic.is_potential_uuid"
},
{
"path": "detect_secrets.filters.heuristic.is_prefixed_with_dollar_sign"
},
{
"path": "detect_secrets.filters.heuristic.is_sequential_string"
},
{
"path": "detect_secrets.filters.heuristic.is_swagger_file"
},
{
"path": "detect_secrets.filters.heuristic.is_templated_secret"
},
{
"path": "detect_secrets.filters.regex.should_exclude_secret",
"pattern": [
"(\\${.*}|from_env|fake|!secret)"
]
}
],
"results": {
"nomad/core/metrics/grafana/grafana.ini": [
{
"type": "Basic Auth Credentials",
"filename": "nomad/core/metrics/grafana/grafana.ini",
"hashed_secret": "e5e9fa1ba31ecd1ae84f75caaa474f3a663f05f4",
"is_verified": false,
"line_number": 78,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "nomad/core/metrics/grafana/grafana.ini",
"hashed_secret": "55ebda65c08313526e7ba08ad733e5ebea9900bd",
"is_verified": false,
"line_number": 109,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "nomad/core/metrics/grafana/grafana.ini",
"hashed_secret": "d033e22ae348aeb5660fc2140aec35850c4da997",
"is_verified": false,
"line_number": 151,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "nomad/core/metrics/grafana/grafana.ini",
"hashed_secret": "10bea62ff1e1a7540dc7a6bc10f5fa992349023f",
"is_verified": false,
"line_number": 154,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "nomad/core/metrics/grafana/grafana.ini",
"hashed_secret": "5718bce97710e6be87ea160b36eaefb5032857d3",
"is_verified": false,
"line_number": 239,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "nomad/core/metrics/grafana/grafana.ini",
"hashed_secret": "10aed9d7ebef778a9b3033dba3f7813b639e0d50",
"is_verified": false,
"line_number": 252,
"is_secret": false
}
],
"nomad/core/syslogng.nomad": [
{
"type": "Base64 High Entropy String",
"filename": "nomad/core/syslogng.nomad",
"hashed_secret": "298b5925fe7c7458cb8a12a74621fdedafea5ad6",
"is_verified": false,
"line_number": 159,
"is_secret": false
},
{
"type": "Base64 High Entropy String",
"filename": "nomad/core/syslogng.nomad",
"hashed_secret": "3a1cec2d3c3de7e4da4d99c6731ca696c24b72b4",
"is_verified": false,
"line_number": 159,
"is_secret": false
}
],
"nomad/vault_hashi_vault_values.example.yml": [
{
"type": "Secret Keyword",
"filename": "nomad/vault_hashi_vault_values.example.yml",
"hashed_secret": "f2baa52d02ca888455ce47823f47bf372d5eecb3",
"is_verified": false,
"line_number": 8,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "nomad/vault_hashi_vault_values.example.yml",
"hashed_secret": "18960546905b75c869e7de63961dc185f9a0a7c9",
"is_verified": false,
"line_number": 10,
"is_secret": false
},
{
"type": "Secret Keyword",
"filename": "nomad/vault_hashi_vault_values.example.yml",
"hashed_secret": "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33",
"is_verified": false,
"line_number": 22,
"is_secret": false
}
]
},
"generated_at": "2022-10-27T21:28:03Z"
}

View File

@ -1,35 +0,0 @@
.PHONY: default
default: check
# Ensures virtualenv is present
virtualenv_run:
virtualenv --python python3 virtualenv_run
./virtualenv_run/bin/pip install -r requirements.txt
# Alias for virtualenv_run
.PHONY: virtualenv
virtualenv: virtualenv_run
# Installs pre-commit hooks
.PHONY: install-hooks
install-hooks: virtualenv_run
./virtualenv_run/bin/pre-commit install --install-hooks
# Checks files for encryption
.PHONY: check
check: virtualenv_run
./virtualenv_run/bin/pre-commit run --all-files
# Creates a new secrets baseline
.secrets-baseline: virtualenv_run
./virtualenv_run/bin/detect-secrets scan --exclude-secrets '(\$${.*}|from_env|fake|!secret)' > .secrets-baseline
# Audits secrets against baseline
.PHONY: secrets-audit
secrets-audit: virtualenv_run .secrets-baseline
./virtualenv_run/bin/detect-secrets audit .secrets-baseline
# Updates secrets baseline
.PHONY: secrets-update
secrets-update: virtualenv_run .secrets-baseline
./virtualenv_run/bin/detect-secrets scan --baseline .secrets-baseline

3
nomad/.gitignore vendored
View File

@ -3,6 +3,3 @@ venv/
vault-keys.json
nomad_bootstrap.json
ca/
collections/ansible_collections/
consul_values.yml
vault_hashi_vault_values.yml

View File

@ -4,7 +4,6 @@
provider "registry.terraform.io/hashicorp/consul" {
version = "2.14.0"
hashes = [
"h1:lJWOdlqevg6FQLFlfM3tGOsy9yPrjm9/vqkfzVrqT/A=",
"h1:xRwktNwLL3Vo43F7v73tfcgbcnjCE2KgCzcNrsQJ1cc=",
"zh:06dcca1f76b839af8f86c7b6f65b944003a7a35b30b865b3884f48e2c42f9aee",
"zh:16111df6a485e21cee6ca33cb863434baa1ca360c819c8e2af85e465c1361d2b",
@ -20,29 +19,9 @@ provider "registry.terraform.io/hashicorp/consul" {
]
}
provider "registry.terraform.io/hashicorp/external" {
version = "2.2.2"
hashes = [
"h1:e7RpnZ2PbJEEPnfsg7V0FNwbfSk0/Z3FdrLsXINBmDY=",
"zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca",
"zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28",
"zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b",
"zh:719d6ef39c50e4cffc67aa67d74d195adaf42afcf62beab132dafdb500347d39",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:7fbfc4d37435ac2f717b0316f872f558f608596b389b895fcb549f118462d327",
"zh:8ac71408204db606ce63fe8f9aeaf1ddc7751d57d586ec421e62d440c402e955",
"zh:a4cacdb06f114454b6ed0033add28006afa3f65a0ea7a43befe45fc82e6809fb",
"zh:bb5ce3132b52ae32b6cc005bc9f7627b95259b9ffe556de4dad60d47d47f21f0",
"zh:bb60d2976f125ffd232a7ccb4b3f81e7109578b23c9c6179f13a11d125dca82a",
"zh:f9540ecd2e056d6e71b9ea5f5a5cf8f63dd5c25394b9db831083a9d4ea99b372",
"zh:ffd998b55b8a64d4335a090b6956b4bf8855b290f7554dd38db3302de9c41809",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
@ -62,7 +41,6 @@ provider "registry.terraform.io/hashicorp/vault" {
version = "3.3.1"
hashes = [
"h1:SOTmxGynxFf1hECFq0/FGujGQZNktePze/4mfdR/iiU=",
"h1:i7EC2IF0KParI+JPA5ZtXJrAn3bAntW5gEMLvOXwpW4=",
"zh:3e1866037f43c1083ff825dce2a9e3853c757bb0121c5ae528ee3cf3f99b4113",
"zh:49636cc5c4939134e098c4ec0163c41fae103f24d7e1e8fc0432f8ad93d596a0",
"zh:5258a7001719c4aeb84f4c4da7115b795da4794754938a3c4176a4b578fe93a1",

View File

@ -55,35 +55,16 @@ cluster: ansible-cluster
venv/bin/ansible:
python3 -m venv venv
./venv/bin/pip install ansible python-consul hvac
.PHONY: galaxy
galaxy: venv/bin/ansible
./venv/bin/ansible-galaxy install -p roles -r roles/requirements.yml
./venv/bin/ansible-galaxy collection install -r collections/requirements.yml
./venv/bin/pip install ansible
./venv/bin/pip install python-consul
.PHONY: ansible-cluster
ansible-cluster: venv/bin/ansible galaxy
ansible-cluster: venv/bin/ansible
./venv/bin/ansible-galaxy install -p roles -r roles/requirements.yml
env VIRTUAL_ENV=/Users/ifij/workspace/iamthefij/orchestration-tests/nomad/venv ./venv/bin/ansible-playbook -K -vv \
$(shell test -f vault-keys.json && echo '-e "@vault-keys.json"') \
-i ansible_hosts.yml -M ./roles ./setup-cluster.yml
.PHONY: bootstrap-values
bootstrap-values: venv/bin/ansible galaxy
env VIRTUAL_ENV=/Users/ifij/workspace/iamthefij/orchestration-tests/nomad/venv ./venv/bin/ansible-playbook -vv \
$(shell test -f vault-keys.json && echo '-e "@vault-keys.json"') \
-i ansible_hosts.yml -M ./roles ./bootstrap-values.yml
.PHONY: unseal-vault
unseal-vault: venv/bin/ansible galaxy
env VIRTUAL_ENV=/Users/ifij/workspace/iamthefij/orchestration-tests/nomad/venv ./venv/bin/ansible-playbook -K -vv \
-e "@vault-keys.json" -i ansible_hosts.yml -M ./roles ./unseal-vault.yml
.PHONY: init
init:
@terraform init
.PHONY: plan
plan:
@terraform plan \

View File

@ -1,59 +1,38 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.1"
hashes = [
"h1:PexyQBRLDA+SR+sWlzYBZswry5O5h/tTfj87CaECtLc=",
"zh:1806830a3cf103e65e772a7d28fd4df2788c29a029fb2def1326bc777ad107ed",
"zh:252be544fb4c9daf09cad7d3776daf5fa66b62740d3ea9d6d499a7b1697c3433",
"zh:50985fe02a8e5ae47c75d7c28c911b25d7dc4716cff2ed55ca05889ab77a1f73",
"zh:54cf0ec90538703c66937c77e8d72a38d5af47437eb0b8b55eb5836c5d288878",
"zh:704f536c621337e06fffef6d5f49ac81f52d249f937250527c12884cb83aefed",
"zh:896d8ef6d0b555299f124eb25bce8a17d735da14ef21f07582098d301f47da30",
"zh:976277a85b0a0baafe267cc494f766448d1da5b6936ddcb3ce393bd4d22f08d2",
"zh:c7faa9a2b11bc45833a3e8e340f22f1ecf01597eaeffa7669234b4549d7dfa85",
"zh:caf851ef9c8ce482864badf7058f9278d4537112fa236efd8f1a9315801d9061",
"zh:db203435d58b0ac842540861b3307a623423275d85754c171773f3b210ae5b24",
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
"zh:f710a37190429045d109edd35de69db3b5f619919c2fa04c77a3a639fea9fd7d",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.17"
version = "1.4.16"
hashes = [
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}
provider "registry.terraform.io/hashicorp/vault" {
version = "3.7.0"
version = "3.4.1"
hashes = [
"h1:idawLPCbZgHIb+NRLJs4YdIcQgACqYiT5VwQfChkn+w=",
"zh:256b82692c560c76ad51414a2c003cadfa10338a9df333dbe22dd14a9ed16f95",
"zh:329ed8135a98bd6a000d014e40bc5981c6868cf50eedf454f1a1f72ac463bdf0",
"zh:3b32c18b492a6ac8e1ccac40d28cd42a88892ef8f3515291676136e3faac351c",
"zh:4c5ea8e80543b36b1999257a41c8b9cde852542251de82a94cff2f9d280ac2ec",
"zh:5d968ed305cde7aa3567a943cb2f5f8def54b40a2292b66027b1405a1cf28585",
"zh:60226d1a0a496a9a6c1d646800dd7e1bd1c4f5527e7307ff0bca9f4d0b5395e2",
"zh:71b11def501c994ee5305f24bd47ebfcca2314c5acca3efcdd209373d0068ac0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:89be6b5db3be473bfd14422a9abf83245c4b22ce47a8fe463bbebf8e20958ab1",
"zh:8f91051d43ae309bb8f3f6a9659f0fd26b1b239faf671c139b4e9ad0d208db05",
"zh:b5114983273d3170878f657b92738b2c40953aedeef2e1840588ecaf1bc0827e",
"zh:fd56db01c5444dc8ca2e0ad2f13fc4c17735d0fdeb5960e23176fb3f5a5114d3",
"h1:oow6cAwKiFpJBBWKsDqNmwZIrFTWWvoeIbqs+vyUDE0=",
"zh:1eb8370a1846e34e2bcc4d11eece5733735784a8eab447bbed3cfd822101b577",
"zh:2df3989327cea68b2167514b7ebddc67b09340f00bbf3fa85df03c97adfb9d25",
"zh:3dd1e317264f574985e856296deef71a76464918bf0566eb0d7f6389ea0586bd",
"zh:9750861f2822482aa608ea5a52b385bc42b2e1f2511094e6a975412618c4495d",
"zh:9b940e7f78975d29a4d0a116cf43c0bc1cb03bec4ad8d34887d64e6e60bacb9e",
"zh:9cb6e7ad2a62529d35dacd20695d49c2f02230cb785d46178cc10f4ec80e5a51",
"zh:a12718689bbcb37bcbb9132c18bffd354fad8ab5c8cb89cec1a0ee85c65b8cb7",
"zh:a6e38afacca1af4fab04a9f2dc49b8295eb462db68bdc7451352d0f950f804f8",
"zh:d6e0e994d51b9e07d5713d4796381f9e129e9de962e79caae2b7055f6f68297e",
"zh:ea4bbef7a1bb2553db473fa304c93845674167b61e8c9677107a96c8c696da12",
"zh:f985a8b7f4ef7d1eba9cef7d99997ee9c4a54ffe76dab7fa8b1fdec2a9edca7e",
]
}

6
nomad/acls/acls.tf Normal file
View File

@ -0,0 +1,6 @@
resource "nomad_acl_policy" "create_post_bootstrap_policy" {
# count = can(tobool(var.nomad_secret_id)) ? 1 : 0
name = "anonymous"
description = "Anon RW"
rules_hcl = file("${path.module}/nomad-anon-bootstrap.hcl")
}

View File

@ -1,23 +0,0 @@
namespace "*" {
policy = "read"
}
agent {
policy = "read"
}
operator {
policy = "read"
}
quota {
policy = "read"
}
node {
policy = "read"
}
host_volume "*" {
policy = "read"
}

View File

@ -1,4 +0,0 @@
namespace "*" {
policy = "read"
capabilities = ["submit-job", "dispatch-job", "read-logs"]
}

View File

@ -1,18 +0,0 @@
resource "nomad_acl_policy" "anon_policy" {
name = "anonymous"
description = "Anon RO"
rules_hcl = file("${path.module}/nomad-anon-policy.hcl")
}
resource "nomad_acl_policy" "admin" {
name = "admin"
description = "Admin RW for admins"
rules_hcl = file("${path.module}/nomad-admin-policy.hcl")
}
# TODO: (security) Limit this scope
resource "nomad_acl_policy" "deploy" {
name = "deploy"
description = "Write for job deployments"
rules_hcl = file("${path.module}/nomad-deploy-policy.hcl")
}

View File

@ -8,41 +8,90 @@ resource "vault_nomad_secret_backend" "config" {
backend = "nomad"
description = "Nomad ACL"
token = nomad_acl_token.vault.secret_id
default_lease_ttl_seconds = "3600"
max_lease_ttl_seconds = "7200"
ttl = "3600"
max_ttl = "7200"
}
# Vault roles generating Nomad tokens
resource "vault_nomad_secret_role" "nomad-deploy" {
backend = vault_nomad_secret_backend.config.backend
role = "nomad-deploy"
# Nomad policies
policies = ["deploy"]
backend = vault_nomad_secret_backend.config.backend
role = "nomad-deploy"
policies = ["nomad-deploy"]
}
resource "vault_nomad_secret_role" "admin-management" {
resource "vault_nomad_secret_role" "admin" {
backend = vault_nomad_secret_backend.config.backend
role = "admin-management"
type = "management"
}
resource "vault_nomad_secret_role" "admin" {
backend = vault_nomad_secret_backend.config.backend
role = "admin"
# Nomad policies
policies = ["admin"]
resource "vault_policy" "nomad-deploy" {
name = "nomad-deploy"
policy = <<EOH
path "nomad/creds/nomad-deploy" {
capabilities = ["read"]
}
EOH
}
# Nomad Vault token access
resource "vault_token_auth_backend_role" "nomad-cluster" {
role_name = "nomad-cluster"
token_explicit_max_ttl = 0
allowed_policies = ["access-tables", "nomad-task"]
allowed_policies = ["access-tables"]
orphan = true
token_period = 259200
renewable = true
}
# Policy for nomad tokens
resource "vault_policy" "nomad-token" {
name = "nomad-server"
policy = <<EOH
# Allow creating tokens under "nomad-cluster" token role. The token role name
# should be updated if "nomad-cluster" is not used.
path "auth/token/create/nomad-cluster" {
capabilities = ["update"]
}
# Allow looking up "nomad-cluster" token role. The token role name should be
# updated if "nomad-cluster" is not used.
path "auth/token/roles/nomad-cluster" {
capabilities = ["read"]
}
# Allow looking up the token passed to Nomad to validate # the token has the
# proper capabilities. This is provided by the "default" policy.
path "auth/token/lookup-self" {
capabilities = ["read"]
}
# Allow looking up incoming tokens to validate they have permissions to access
# the tokens they are requesting. This is only required if
# `allow_unauthenticated` is set to false.
path "auth/token/lookup" {
capabilities = ["update"]
}
# Allow revoking tokens that should no longer exist. This allows revoking
# tokens for dead tasks.
path "auth/token/revoke-accessor" {
capabilities = ["update"]
}
# Allow checking the capabilities of our own token. This is used to validate the
# token upon startup.
path "sys/capabilities-self" {
capabilities = ["update"]
}
# Allow our own token to be renewed.
path "auth/token/renew-self" {
capabilities = ["update"]
}
EOH
}
# Create a vault token for Nomad
# resource "vault_token" "nomad-token" {
# policies = ["nomad-server"]
# period = "72h"
# no_parent = true
# }

View File

@ -10,7 +10,7 @@
#
# mysql {
# # How to give access here?
# connection_url = "{{username}}:{{password}}@tcp(mysql-server.service.consul:3306)"
# connection_url = "{{username}}:{{password}}@tcp(localhost:3306)"
# username = ""
# password = ""
# }

View File

@ -1,38 +0,0 @@
# Configure Consul provider
provider "consul" {
address = var.consul_address
}
# Get Nomad client from Consul
data "consul_service" "nomad" {
name = "nomad-client"
}
# Get Vault client from Consul
data "consul_service" "vault" {
name = "vault"
tag = "active"
}
locals {
# Get Nomad address from Consul
nomad_node = data.consul_service.nomad.service[0]
nomad_node_address = "http://${local.nomad_node.node_address}:${local.nomad_node.port}"
# Get Vault address from Consul
vault_node = data.consul_service.vault.service[0]
vault_node_address = "http://${local.vault_node.node_address}:${local.vault_node.port}"
}
# Configure the Nomad provider
provider "nomad" {
address = local.nomad_node_address
secret_id = var.nomad_secret_id
region = "global"
}
# Configure the Vault provider
provider "vault" {
address = local.vault_node_address
token = var.vault_token
}

View File

@ -1,17 +0,0 @@
variable "consul_address" {
type = string
default = "http://n1.thefij:8500"
}
variable "nomad_secret_id" {
type = string
description = "Secret ID for ACL bootstrapped Nomad"
sensitive = true
default = ""
}
variable "vault_token" {
type = string
sensitive = true
default = ""
}

View File

@ -1,8 +0,0 @@
resource "vault_auth_backend" "userpass" {
type = "userpass"
tune {
max_lease_ttl = "1h"
listing_visibility = "unauth"
}
}

View File

@ -1,83 +0,0 @@
resource "vault_policy" "admin" {
name = "admin"
policy = <<EOF
path "*" {
capabilities = ["create", "read", "update", "delete", "list", "sudo"]
}
EOF
}
resource "vault_policy" "nomad-deploy" {
name = "nomad-deploy"
policy = <<EOH
path "nomad/creds/nomad-deploy" {
capabilities = ["read"]
}
EOH
}
# Policy for clusters
resource "vault_policy" "nomad-task" {
name = "nomad-task"
policy = <<EOH
path "kv/data/*" {
# Does this need create, update, delete?
capabilities = ["create", "read", "update", "delete", "list"]
}
EOH
}
# Policy for nomad tokens
resource "vault_policy" "nomad-server" {
name = "nomad-server"
policy = <<EOH
# Allow creating tokens under "nomad-cluster" token role. The token role name
# should be updated if "nomad-cluster" is not used.
path "auth/token/create/nomad-cluster" {
capabilities = ["update"]
}
# Allow looking up "nomad-cluster" token role. The token role name should be
# updated if "nomad-cluster" is not used.
path "auth/token/roles/nomad-cluster" {
capabilities = ["read"]
}
# Allow looking up the token passed to Nomad to validate # the token has the
# proper capabilities. This is provided by the "default" policy.
path "auth/token/lookup-self" {
capabilities = ["read"]
}
# Allow looking up incoming tokens to validate they have permissions to access
# the tokens they are requesting. This is only required if
# `allow_unauthenticated` is set to false.
path "auth/token/lookup" {
capabilities = ["update"]
}
# Allow revoking tokens that should no longer exist. This allows revoking
# tokens for dead tasks.
path "auth/token/revoke-accessor" {
capabilities = ["update"]
}
# Allow checking the capabilities of our own token. This is used to validate the
# token upon startup.
path "sys/capabilities-self" {
capabilities = ["update"]
}
# Allow our own token to be renewed.
path "auth/token/renew-self" {
capabilities = ["update"]
}
# This section grants all access on "secret/*". Further restrictions can be
# applied to this broad policy, as shown below.
path "kv/data/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
EOH
}

View File

@ -13,12 +13,6 @@ all:
group: "bin"
mode: "0755"
read_only: false
- name: lldap-data
path: /srv/volumes/lldap
owner: "root"
group: "bin"
mode: "0755"
read_only: false
n2.thefij:
nomad_node_class: ingress
nomad_node_role: both
@ -35,25 +29,15 @@ all:
group: "bin"
mode: "0755"
read_only: false
- name: sonarr-data
path: /srv/volumes/sonarr
- name: authentik-data
path: /srv/volumes/gitea
owner: "root"
group: "bin"
mode: "0755"
read_only: false
- name: nzbget-data
path: /srv/volumes/nzbget
owner: "root"
group: "bin"
mode: "0755"
read_only: false
# n3.thefij:
# nomad_node_class: ingress
# nomad_node_role: both
# pi3:
# nomad_node_role: client
# pi4:
# nomad_node_role: client
n3.thefij:
nomad_node_class: ingress
nomad_node_role: both
consul_instances:
children:

View File

@ -4,7 +4,6 @@
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",

118
nomad/backups/backup.nomad Normal file
View File

@ -0,0 +1,118 @@
variable "nextcloud_backup" {
type = string
description = "HCL config for Restic Scheduler jobs"
}
job "backup" {
datacenters = ["dc1"]
type = "system"
constraint {
attribute = "${node.unique.name}"
# Only node with a backup job so far
value = "n2"
}
group "backup" {
network {
mode = "bridge"
port "metrics" {
to = 8080
}
}
volume "all-volumes" {
type = "host"
read_only = true
source = "all-volumes"
}
service {
port = "metrics"
# Add connect to mysql
connect {
sidecar_service {
proxy {
local_service_port = 8080
upstreams {
destination_name = "mysql-server"
local_bind_port = 6060
}
config {
protocol = "tcp"
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
meta {
metrics_addr = "${NOMAD_ADDR_metrics}"
}
}
task "backup" {
driver = "docker"
volume_mount {
volume = "all-volumes"
destination = "/data"
read_only = true
}
config {
image = "iamthefij/resticscheduler"
ports = ["metrics"]
args = [
"/jobs/node-jobs.hcl",
]
mount {
type = "bind"
target = "/jobs"
source = "jobs"
}
}
env = {
"MYSQL_HOST" = "${NOMAD_UPSTREAM_IP_mysql_server}"
"MYSQL_PORT" = "${NOMAD_UPSTREAM_PORT_mysql_server}"
# TODO: Add user with access to all databases or variables for each user
"MYSQL_DATABASE" = "nextcloud"
"MYSQL_USER" = "nextcloud"
"MYSQL_PASSWORD" = "nextcloud"
# TODO: Something from vault
"BACKUP_PASSPHRASE" = "secretpass"
}
template {
data = <<EOF
# Current node is {{ env "node.unique.name" }}
{{ range service "nextcloud" }}
# Nextcloud .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") }}
${var.nextcloud_backup}
{{ end }}{{ end }}
EOF
destination = "jobs/node-jobs.hcl"
}
resources {
cpu = 50
memory = 256
}
}
}
}

25
nomad/backups/backups.tf Normal file
View File

@ -0,0 +1,25 @@
locals {
nextcloud_backup = file("${path.module}/jobs/nextcloud.hcl")
}
resource "nomad_job" "backups" {
hcl2 {
enabled = true
vars = {
"nextcloud_backup" = "${local.nextcloud_backup}",
}
}
jobspec = file("${path.module}/backup.nomad")
}
resource "nomad_job" "backups-oneoff" {
hcl2 {
enabled = true
vars = {
"nextcloud_backup" = "${local.nextcloud_backup}",
}
}
jobspec = file("${path.module}/oneoff.nomad")
}

View File

@ -1,11 +1,23 @@
job "Nextcloud" {
schedule = "0 * * * *"
schedule = "* * * * *"
config {
repo = "rclone::ftp,env_auth:/nomad/nextcloud"
# TODO: Backup to a meaningful location, this is just for testing
repo = "/local/repo"
# Read from secret file
passphrase = env("BACKUP_PASSPHRASE")
}
# Remove when using a proper backup destination
task "Create dir for repo" {
pre_script {
on_backup = "echo 'Backing up something'"
}
pre_script {
on_backup = "mkdir -p /local/repo"
}
}
mysql "Backup database" {
hostname = env("MYSQL_HOST")
port = env("MYSQL_PORT")

131
nomad/backups/oneoff.nomad Normal file
View File

@ -0,0 +1,131 @@
variable "nextcloud_backup" {
type = string
description = "HCL config for Restic Scheduler jobs"
}
job "backup-oneoff" {
datacenters = ["dc1"]
type = "batch"
parameterized {
meta_required = ["job_name"]
meta_optional = ["task", "snapshot"]
}
meta {
task = "backup"
snapshot = "latest"
}
group "nextcloud" {
count = 1
network {
mode = "bridge"
}
volume "nextcloud-data" {
type = "host"
read_only = true
source = "nextcloud-data"
}
volume "gitea-data" {
type = "host"
read_only = true
source = "gitea-data"
}
volume "authentik-data" {
type = "host"
read_only = true
source = "authentik-data"
}
service {
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "mysql-server"
local_bind_port = 6060
}
config {
protocol = "tcp"
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
}
task "backup" {
driver = "docker"
volume_mount {
volume = "nextcloud-data"
destination = "/data/nextcloud"
read_only = false
}
volume_mount {
volume = "gitea-data"
destination = "/data/gitea"
read_only = false
}
volume_mount {
volume = "authentik-data"
destination = "/data/authentik"
read_only = false
}
config {
image = "iamthefij/resticscheduler"
ports = ["backup"]
args = [
"-once",
"-${NOMAD_META_task}",
"${NOMAD_META_job_name}",
"/jobs/nextcloud.hcl",
]
mount {
type = "bind"
target = "/jobs"
source = "jobs"
}
}
env = {
"MYSQL_HOST" = "${NOMAD_UPSTREAM_IP_mysql_server}"
"MYSQL_PORT" = "${NOMAD_UPSTREAM_PORT_mysql_server}"
# TODO: Add user with access to all databases or variables for each user
"MYSQL_DATABASE" = "nextcloud"
"MYSQL_USER" = "nextcloud"
"MYSQL_PASSWORD" = "nextcloud"
# TODO: Something from vault
"BACKUP_PASSPHRASE" = "secretpass"
}
template {
data = var.nextcloud_backup
destination = "jobs/nextcloud.hcl"
}
resources {
cpu = 50
memory = 256
}
}
}
}

View File

@ -4,7 +4,6 @@
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",

View File

@ -10,7 +10,6 @@ job "blocky" {
update {
max_parallel = 1
auto_revert = true
}
group "blocky" {
@ -23,7 +22,6 @@ job "blocky" {
}
port "api" {
host_network = "loopback"
to = "4000"
}
}
@ -41,10 +39,6 @@ job "blocky" {
metrics_addr = "${NOMAD_ADDR_api}"
}
tags = [
"traefik.enable=true",
]
connect {
sidecar_service {
proxy {
@ -69,8 +63,7 @@ job "blocky" {
sidecar_task {
resources {
cpu = 50
memory = 20
memory_max = 50
memory = 50
}
}
}
@ -85,7 +78,7 @@ job "blocky" {
}
}
task "blocky" {
task "main" {
driver = "docker"
config {
@ -101,14 +94,12 @@ job "blocky" {
resources {
cpu = 50
memory = 50
memory_max = 100
memory = 100
}
template {
data = var.config_data
destination = "app/config.yml"
splay = "1m"
}
}
}

View File

@ -2,29 +2,6 @@ upstream:
default:
- 1.1.1.1
- 1.0.0.1
quad9:
- 9.9.9.9
- 149.112.112.112
- 2620:fe::fe
- 2620:fe::9
- https://dns.quad9.net/dns-query
- tcp-tls:dns.quad9.net
quad9-unsecured:
- 9.9.9.10
- 149.112.112.10
- 2620:fe::10
- 2620:fe::fe:10
- https://dns10.quad9.net/dns-query
- tcp-tls:dns10.quad9.net
conditional:
mapping:
home.arpa: 192.168.2.1
in-addr.arpa: 192.168.2.1
iot: 192.168.2.1
local: 192.168.2.1
thefij: 192.168.2.1
.: 192.168.2.1
blocking:
blackLists:
@ -39,10 +16,6 @@ blocking:
- https://perflyst.github.io/PiHoleBlocklist/regex.list
malware:
- https://mirror1.malwaredomains.com/files/justdomains
whiteLists:
# Move to Gitea when deployed internally
ads:
{{ keyOrDefault "blocky/whitelists/ads" "# None" | indent 6 }}
clientGroupsBlock:
default:
- ads
@ -60,6 +33,14 @@ customDNS:
{{- end -}}
{{- with index . $last }}{{ .Address }}{{ end -}}
{{- end }}
{{ with service "whoami" -}}
{{- $last := len . | subtract 1 -}}
{{- $services := . -}}
whoami: {{ range $i := loop $last -}}
{{- with index $services $i }}{{ .Address }},{{ end -}}
{{- end -}}
{{- with index . $last }}{{ .Address }}{{ end -}}
{{- end }}
# Other mappings
{{ keyOrDefault "blocky/mappings" "# None" | indent 4 }}

View File

@ -1,80 +0,0 @@
---
- name: Bootstrap Consul values
hosts: consul_instances
gather_facts: false
vars_files:
- consul_values.yml
tasks:
- name: Add values
delegate_to: localhost
run_once: true
block:
- name: Install python-consul
pip:
name: python-consul
extra_args: --index-url https://pypi.org/simple
- name: Write values
consul_kv:
host: "{{ inventory_hostname }}"
key: "{{ item.key }}"
value: "{{ item.value }}"
loop: "{{ consul_values | default({}) | dict2items }}"
- name: Bootstrap value values
hosts: vault_instances
gather_facts: false
vars_files:
- ./vault_hashi_vault_values.yml
tasks:
- name: Bootstrap Vault secrets
delegate_to: localhost
run_once: true
block:
- name: Install hvac
pip:
name: hvac
extra_args: --index-url https://pypi.org/simple
- name: Check mount
community.hashi_vault.vault_read:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "/sys/mounts/kv"
ignore_errors: true
register: check_mount
- name: Create kv mount
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "/sys/mounts/kv"
data:
type: kv-v2
when: check_mount is not succeeded
- name: Write values
no_log: true
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "kv/data/{{ item.key }}"
data:
data:
"{{ item.value }}"
loop: "{{ hashi_vault_values | default({}) | dict2items }}"
retries: 2
delay: 10
- name: Write userpass
no_log: true
community.hashi_vault.vault_write:
url: "http://{{ inventory_hostname }}:8200"
token: "{{ root_token }}"
path: "auth/userpass/users/{{ item.name }}"
data: '{"password": "{{ item.password }}", "policies": "{{ item.policies }}"}'
loop: "{{ vault_userpass }}"

View File

@ -1,64 +0,0 @@
---
- name: Delete Consul data
hosts: consul_instances
tasks:
- name: Stop consul
systemd:
name: consul
state: stopped
become: true
- name: Stop vault
systemd:
name: consul
state: stopped
become: true
- name: Remove data dir
file:
path: /opt/consul
state: absent
become: true
- name: Delete Nomad data
hosts: nomad_instances
tasks:
- name: Stop nomad
systemd:
name: nomad
state: stopped
become: true
- name: Kill nomad
shell:
cmd: systemctl kill nomad
become: true
- name: Stop all containers
shell:
cmd: docker ps -a | awk '/^[0-9abcdef]/{print $1}' | xargs -r docker stop
become: true
- name: Remove all containers
shell:
cmd: docker ps -a | awk '/^[0-9abcdef]/{print $1}' | xargs -r docker rm
become: true
- name: Unmount secrets
shell:
cmd: mount | awk '/nomad/ {print $3}' | xargs -n1 -r umount
become: true
- name: Remove data dir
file:
path: /var/nomad
state: absent
become: true
- name: Remove data dir
file:
path: /opt/nomad/data
state: absent
become: true

View File

@ -1,4 +0,0 @@
---
collections:
- name: community.hashi_vault
version: 3.0.0

View File

@ -1,4 +0,0 @@
consul_values:
"blocky/whitelists/ads": |
- |
somedomain.com

View File

@ -1,12 +0,0 @@
module "databases" {
source = "./databases"
}
module "core" {
source = "./core"
base_hostname = var.base_hostname
# Metrics and Blocky depend on databases
depends_on = [module.databases]
}

View File

@ -1,59 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" {
version = "2.16.2"
hashes = [
"h1:epldE7sZPBTQHnWEA4WlNJIOVT1UEX+/02SMg5nniaE=",
"zh:0a2e11ca2ba650954951a087a1daec95eee2f3000456b295409a9880c4a10b1a",
"zh:34f6bda06a0d1c213fa8d87d4313687681e67bc8c40c4cbaa7dbe59ce24a4f7e",
"zh:5b85cf93db11ee890f720c317a38158927071feb634855786a0c0cd65825a43c",
"zh:75ef915f3d087e6045751a66fbb7066a852a0944ec8c97200d1134dd84df7ffc",
"zh:8a4a95697bd91ad51a581c12fe50ac61a114afba27895d027f77ac4154a7ea15",
"zh:973d538c8d72793861a1ac9718249a9493f417a2b5096846367560054fd843b9",
"zh:9feb2bdc06fdc2d8370cc9aad9a0c69e7e5ae38aac43f315c3f57507c57be030",
"zh:c5709672d0afecbbe298bf519741ebcb9d04f02a73b5ee0c186dfa241aa5a524",
"zh:c65c60570de6da7190e1e7762577655a463caeb59bc5d38e33034821ed0cbcb9",
"zh:c958d6282650fc472aade61d5df4300936033f43cfb898293ef86aceccdfdf1d",
"zh:cdd3632c81e1d11d3becd193aaa061688840f39147950c45c4301d042743ae6a",
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
]
}
provider "registry.terraform.io/hashicorp/external" {
version = "2.2.2"
hashes = [
"h1:e7RpnZ2PbJEEPnfsg7V0FNwbfSk0/Z3FdrLsXINBmDY=",
"zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca",
"zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28",
"zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b",
"zh:719d6ef39c50e4cffc67aa67d74d195adaf42afcf62beab132dafdb500347d39",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:7fbfc4d37435ac2f717b0316f872f558f608596b389b895fcb549f118462d327",
"zh:8ac71408204db606ce63fe8f9aeaf1ddc7751d57d586ec421e62d440c402e955",
"zh:a4cacdb06f114454b6ed0033add28006afa3f65a0ea7a43befe45fc82e6809fb",
"zh:bb5ce3132b52ae32b6cc005bc9f7627b95259b9ffe556de4dad60d47d47f21f0",
"zh:bb60d2976f125ffd232a7ccb4b3f81e7109578b23c9c6179f13a11d125dca82a",
"zh:f9540ecd2e056d6e71b9ea5f5a5cf8f63dd5c25394b9db831083a9d4ea99b372",
"zh:ffd998b55b8a64d4335a090b6956b4bf8855b290f7554dd38db3302de9c41809",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.19"
hashes = [
"h1:EdBny2gaLr/IE+l+6csyCKeIGFMYZ/4tHKpcbS7ArgE=",
"zh:2f3ceeb3318a6304026035b0ac9ee3e52df04913bb9ee78827e58c5398b41254",
"zh:3fbe76c7d957d20dfe3c8c0528b33084651f22a95be9e0452b658e0922916e2a",
"zh:595671a05828cfe6c42ef73aac894ac39f81a52cc662a76f37eb74ebe04ddf75",
"zh:5d76e8788d2af3e60daf8076babf763ec887480bbb9734baccccd8fcddf4f03e",
"zh:676985afeaca6e67b22d60d43fd0ed7055763029ffebc3026089fe2fd3b4a288",
"zh:69152ce6164ac999a640cff962ece45208270e1ac37c10dac484eeea5cf47275",
"zh:6da0b15c05b81f947ec8e139bd81eeeb05c0d36eb5a967b985d0625c60998b40",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:822c0a3bbada5e38099a379db8b2e339526843699627c3be3664cc3b3752bab7",
"zh:af23af2f98a84695b25c8eba7028a81ad4aad63c44aefb79e01bbe2dc82e7f78",
"zh:e36cac9960b7506d92925b667254322520966b9c3feb3ca6102e57a1fb9b1761",
"zh:ffd1e096c1cc35de879c740a91918e9f06b627818a3cb4b1d87b829b54a6985f",
]
}

View File

@ -1,55 +0,0 @@
job "ddclient" {
datacenters = ["dc1"]
type = "service"
group "ddclient" {
task "ddclient" {
driver = "docker"
config {
image = "linuxserver/ddclient:3.9.1"
mount {
type = "bind"
source = "secrets/ddclient.conf"
target = "/config/ddclient.conf"
}
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
template {
data = <<EOH
daemon=900
ssl=yes
use=web
protocol=cloudflare,
zone={{ key "ddclient/zone" }},
ttl=1,
{{ with secret "kv/data/cloudflare" -}}
login={{ .Data.data.api_user }},
password={{ .Data.data.api_key }}
# login=token,
# password={{ .Data.data.api_token_dns_edit_all }}
{{ end -}}
{{ key "ddclient/domain" }}
EOH
destination = "secrets/ddclient.conf"
change_mode = "restart"
}
resources {
cpu = 50
memory = 50
memory_max = 100
}
}
}
}

View File

@ -1,140 +0,0 @@
job "lldap" {
datacenters = ["dc1"]
type = "service"
group "lldap" {
network {
mode = "bridge"
port "web" {
host_network = "loopback"
to = 17170
}
port "ldap" {
host_network = "loopback"
to = 3890
}
}
volume "lldap-data" {
type = "host"
read_only = false
source = "lldap-data"
}
service {
name = "lldap"
port = "ldap"
connect {
sidecar_service {
proxy {
local_service_port = 3890
config {
protocol = "tcp"
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
}
}
}
}
service {
name = "ldap-admin"
port = "web"
connect {
sidecar_service {
proxy {
local_service_port = 17170
}
}
sidecar_task {
resources {
cpu = 20
memory = 20
}
}
}
tags = [
"traefik.enable=true",
"traefik.http.routers.ldap-admin.entryPoints=websecure",
]
}
task "lldap" {
driver = "docker"
volume_mount {
volume = "lldap-data"
destination = "/data"
read_only = false
}
config {
image = "nitnelave/lldap"
ports = ["ldap", "web"]
args = ["run", "--config-file", "/lldap_config.toml"]
mount {
type = "bind"
source = "secrets/lldap_config.toml"
target = "/lldap_config.toml"
}
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
template {
data = <<EOH
database_url = "sqlite:///data/users.db?mode=rwc"
key_file = "/data/private_key"
ldap_base_dn = "{{ keyOrDefault "global/ldap/base_dn" "dc=example,dc=com" }}"
{{ with secret "kv/data/lldap" -}}
jwt_secret = "{{ .Data.data.jwt_secret }}"
ldap_user_dn = "{{ .Data.data.admin_user }}"
ldap_user_email = "{{ .Data.data.admin_email }}"
ldap_user_pass = "{{ .Data.data.admin_password }}"
{{ end -}}
{{ with secret "kv/data/smtp" -}}
[smtp_options]
enable_password_reset = true
server = "{{ .Data.data.server }}"
port = {{ .Data.data.port }}
tls_required = {{ .Data.data.tls }}
user = "{{ .Data.data.user }}"
password = "{{ .Data.data.password }}"
{{ with secret "kv/data/lldap" -}}
from = "{{ .Data.data.smtp_from }}"
reply_to = "{{ .Data.data.smtp_reply_to }}"
{{ end -}}
{{ end -}}
EOH
destination = "secrets/lldap_config.toml"
change_mode = "restart"
}
resources {
cpu = 10
memory = 20
memory_max = 100
}
}
}
}

View File

@ -1,45 +0,0 @@
auth_enabled: false
server:
http_listen_port: 3100
ingester:
lifecycler:
address: 127.0.0.1
ring:
kvstore:
store: inmemory
replication_factor: 1
final_sleep: 0s
chunk_idle_period: 5m
chunk_retain_period: 30s
max_transfer_retries: 0
schema_config:
configs:
- from: 2018-04-15
store: boltdb
object_store: filesystem
schema: v11
index:
prefix: index_
period: 168h
storage_config:
boltdb:
directory: /loki/index
filesystem:
directory: /loki/chunks
limits_config:
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 0s
table_manager:
retention_deletes_enabled: false
retention_period: 0s

View File

@ -1,134 +0,0 @@
module "blocky" {
source = "./blocky"
base_hostname = var.base_hostname
# Not in this module
# depends_on = [module.databases]
}
module "traefik" {
source = "./traefik"
base_hostname = var.base_hostname
}
module "nomad_login" {
source = "../levant"
template_path = "service.nomad"
variables = {
name = "nomad-login"
image = "iamthefij/nomad-vault-login"
service_port = 5000
ingress = true
ingress_rule = "Host(`nomad.thefij.rocks`) && PathPrefix(`/login`)"
env = jsonencode({
VAULT_ADDR = "http://$${attr.unique.network.ip-address}:8200",
})
}
}
module "metrics" {
source = "./metrics"
# Not in this module
# depends_on = [module.databases]
}
module "loki" {
source = "../levant"
template_path = "service.nomad"
variables = {
name = "loki"
image = "grafana/loki:2.2.1"
service_port = 3100
ingress = true
sticky_disk = true
healthcheck = "/ready"
templates = jsonencode([
{
data = file("${path.module}/loki-config.yml")
dest = "/etc/loki/local-config.yaml"
}
])
}
}
resource "consul_config_entry" "loki_intent" {
name = "loki"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "grafana"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "promtail"
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "syslogng-promtail"
Precedence = 9
Type = "consul"
},
]
})
}
resource "nomad_job" "syslog-ng" {
jobspec = file("${path.module}/syslogng.nomad")
}
resource "nomad_job" "ddclient" {
jobspec = file("${path.module}/ddclient.nomad")
}
resource "nomad_job" "lldap" {
jobspec = file("${path.module}/lldap.nomad")
}
resource "consul_config_entry" "syslogng_promtail_intent" {
name = "syslogng-promtail"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "syslogng"
Precedence = 9
Type = "consul"
},
]
})
}
resource "consul_config_entry" "global_access" {
name = "*"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "traefik"
Precedence = 6
Type = "consul"
},
{
Action = "deny"
Name = "*"
Precedence = 5
Type = "consul"
},
]
})
}

View File

@ -1,150 +0,0 @@
job "metrics" {
datacenters = ["dc1"]
type = "system"
group "promtail" {
network {
mode = "bridge"
port "promtail" {
to = 9080
}
}
service {
name = "promtail"
port = "promtail"
meta {
metrics_addr = "${NOMAD_ADDR_promtail}"
nomad_dc = "${NOMAD_DC}"
nomad_node_name = "${node.unique.name}"
}
connect {
sidecar_service {
proxy {
local_service_port = 9080
upstreams {
destination_name = "loki"
local_bind_port = 1000
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
}
}
}
check {
type = "http"
path = "/metrics"
port = "promtail"
interval = "10s"
timeout = "10s"
}
}
task "promtail" {
driver = "docker"
config {
image = "grafana/promtail:2.2.1"
args = ["-config.file=/etc/promtail/promtail.yml"]
ports = ["promtail"]
# Mount config
mount {
type = "bind"
target = "/etc/promtail/promtail.yml"
source = "local/promtail.yml"
}
# Bind mount host machine-id and log directories
mount {
type = "bind"
source = "/etc/machine-id"
target = "/etc/machine-id"
readonly = true
}
mount {
type = "bind"
source = "/var/log/journal/"
target = "/var/log/journal/"
readonly = true
}
mount {
type = "bind"
source = "/run/log/journal/"
target = "/run/log/journal/"
readonly = true
}
# mount {
# type = "bind"
# source = "/var/log/audit"
# target = "/var/log/audit"
# readonly = true
# }
}
template {
data = <<EOF
---
server:
http_listen_address: 0.0.0.0
http_listen_port: 9080
clients:
# loki upstream: {{ env "NOMAD_UPSTREAM_ADDR_loki" }}
- url: http://{{ env "NOMAD_UPSTREAM_ADDR_loki" }}/loki/api/v1/push
scrape_configs:
- job_name: journal
journal:
json: false
max_age: 12h
path: /var/log/journal
labels:
job: systemd-journal
relabel_configs:
- source_labels: ['__journal__systemd_unit']
target_label: unit
- source_labels: ['__journal__hostname']
target_label: hostname
- source_labels: ['__journal__transport']
target_label: journal_transport
# Docker log labels
- source_labels: ['__journal_syslog_identifier']
target_label: syslog_identifier
- source_labels: ['__journal_image_name']
target_label: docker_image_name
- source_labels: ['__journal_container_name']
target_label: docker_container_name
- source_labels: ['__journal_container_id']
target_label: docker_container_id
- source_labels: ['__journal_com_docker_compose_project']
target_label: docker_compose_project
- source_labels: ['__journal_com_docker_compose_service']
target_label: docker_compose_service
EOF
destination = "local/promtail.yml"
}
resources {
cpu = 50
memory = 50
}
}
}
}

View File

@ -1,209 +0,0 @@
job "grafana" {
datacenters = ["dc1"]
group "grafana" {
count = 1
# TODO: Add backup task or job
network {
mode = "bridge"
port "web" {
host_network = "loopback"
to = 3000
}
}
ephemeral_disk {
migrate = true
sticky = true
}
service {
name = "grafana"
port = "web"
connect {
sidecar_service {
proxy {
local_service_port = 3000
upstreams {
destination_name = "prometheus"
local_bind_port = 9090
}
upstreams {
destination_name = "loki"
local_bind_port = 3100
}
upstreams {
destination_name = "mysql-server"
local_bind_port = 6060
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
check {
type = "http"
path = "/"
port = "web"
interval = "10s"
timeout = "10s"
}
tags = [
"traefik.enable=true",
]
}
task "grafana-bootstrap" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "mysql:8"
args = [
"/bin/bash",
"-c",
"/usr/bin/mysql --defaults-extra-file=/task/my.cnf < /task/bootstrap.sql",
]
mount {
type = "bind"
source = "local/"
target = "/task/"
}
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
template {
data = <<EOF
[client]
host={{ env "NOMAD_UPSTREAM_IP_mysql_server" }}
port={{ env "NOMAD_UPSTREAM_PORT_mysql_server" }}
user=root
{{ with secret "kv/data/mysql" }}
password={{ .Data.data.root_password }}
{{ end }}
EOF
destination = "local/my.cnf"
}
template {
data = <<EOF
{{ with secret "kv/data/grafana" -}}
{{ if .Data.data.db_name -}}
CREATE DATABASE IF NOT EXISTS `{{ .Data.data.db_name }}`;
CREATE USER IF NOT EXISTS '{{ .Data.data.db_user }}'@'%' IDENTIFIED BY '{{ .Data.data.db_pass }}';
GRANT ALL ON `{{ .Data.data.db_name }}`.* to '{{ .Data.data.db_user }}'@'%';
{{ else -}}
SELECT 'NOOP';
{{ end -}}
{{ end -}}
EOF
destination = "local/bootstrap.sql"
}
resources {
cpu = 50
memory = 50
}
}
task "grafana" {
driver = "docker"
config {
image = "grafana/grafana:7.3.6"
ports = ["web"]
mount {
type = "bind"
target = "/etc/grafana"
source = "local/config"
}
}
env = {
"GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel",
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
template {
data = <<EOF
{{ with secret "kv/data/grafana" -}}
GF_SECURITY_ADMIN_PASSWORD={{ .Data.data.admin_pw }}
GF_SMTP_USER={{ .Data.data.smtp_user }}
GF_SMTP_PASSWORD={{ .Data.data.smtp_password }}
GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .Data.data.minio_access_key }}
GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .Data.data.minio_secret_key }}
GRAFANA_ALERT_EMAIL_ADDRESSES={{ .Data.data.alert_email_addresses }}
{{ if .Data.data.db_name -}}
# Database storage
GF_DATABASE_TYPE=mysql
GF_DATABASE_HOST={{ env "NOMAD_UPSTREAM_ADDR_mysql_server" }}
GF_DATABASE_NAME={{ .Data.data.db_name }}
GF_DATABASE_USER={{ .Data.data.db_user }}
GF_DATABASE_PASSWORD={{ .Data.data.db_pass }}
{{ end -}}
{{ end -}}
{{ with secret "kv/data/slack" -}}
SLACK_BOT_URL={{ .Data.data.bot_url }}
SLACK_BOT_TOKEN={{ .Data.data.bot_token }}
SLACK_HOOK_URL={{ .Data.data.hook_url }}
{{ end -}}
EOF
env = true
destination = "secrets/conf.env"
}
%{ for config_file in fileset(join("/", [module_path, "grafana"]), "**") ~}
template {
data = <<EOF
${file(join("/", [module_path, "grafana", config_file]))}
EOF
change_mode = "signal"
change_signal = "SIGHUP"
destination = "local/config/${config_file}"
# Change template delimeter for dashboard files that use json and have double curly braces and square braces
%{ if length(regexall("dashboard", config_file)) > 0 ~}
left_delimiter = "<<<<"
right_delimiter = ">>>>"
%{ endif ~}
}
%{ endfor ~}
resources {
cpu = 100
memory = 200
}
}
}
}

View File

@ -1,465 +0,0 @@
##################### Grafana Configuration Example #####################
#
# Everything has defaults so you only need to uncomment things you want to
# change
# possible values : production, development
;app_mode = production
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
;instance_name = ${HOSTNAME}
#################################### Paths ####################################
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
data = /var/lib/grafana
# Directory where grafana can store logs
;logs = /var/log/grafana
# Directory where grafana will automatically scan and look for plugins
;plugins = /var/lib/grafana/plugins
# folder that contains provisioning config files that grafana will apply on startup and while running.
provisioning = /etc/grafana/provisioning
#################################### Server ####################################
[server]
# Protocol (http, https, socket)
;protocol = http
# The ip address to bind to, empty will bind to all interfaces
;http_addr =
# The http port to use
;http_port = 3000
# The public facing domain name used to access grafana from a browser
;domain = localhost
# Redirect to correct domain if host header does not match domain
# Prevents DNS rebinding attacks
;enforce_domain = false
# The full public facing url you use in browser, used for redirects and emails
# If you use reverse proxy and sub path specify full url (with sub path)
root_url = https://grafana.thefij.rocks
# Log web requests
;router_logging = false
# the path relative working path
;static_root_path = public
# enable gzip
;enable_gzip = false
# https certs & key file
;cert_file =
;cert_key =
# Unix socket path
;socket =
#################################### Database ####################################
[database]
# You can configure the database connection by specifying type, host, name, user and password
# as separate properties or as on string using the url properties.
# Either "mysql", "postgres" or "sqlite3", it's your choice
;type = sqlite3
;host = 127.0.0.1:3306
;name = grafana
;user = root
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
;password =
# Use either URL or the previous fields to configure the database
# Example: mysql://user:secret@host:port/database
;url =
# For "postgres" only, either "disable", "require" or "verify-full"
;ssl_mode = disable
# For "sqlite3" only, path relative to data_path setting
;path = grafana.db
# Max idle conn setting default is 2
;max_idle_conn = 2
# Max conn setting default is 0 (mean not set)
;max_open_conn =
# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours)
;conn_max_lifetime = 14400
# Set to true to log the sql calls and execution times.
log_queries =
#################################### Session ####################################
[session]
# Either "memory", "file", "redis", "mysql", "postgres", default is "file"
;provider = file
# Provider config options
# memory: not have any config yet
# file: session dir path, is relative to grafana data_path
# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
# mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`
# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
;provider_config = sessions
# Session cookie name
;cookie_name = grafana_sess
# If you use session in https only, default is false
;cookie_secure = false
# Session life time, default is 86400
;session_life_time = 86400
#################################### Data proxy ###########################
[dataproxy]
# This enables data proxy logging, default is false
;logging = false
#################################### Analytics ####################################
[analytics]
# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
# No ip addresses are being tracked, only simple counters to track
# running instances, dashboard and error counts. It is very helpful to us.
# Change this option to false to disable reporting.
;reporting_enabled = true
# Set to false to disable all checks to https://grafana.net
# for new vesions (grafana itself and plugins), check is used
# in some UI views to notify that grafana or plugin update exists
# This option does not cause any auto updates, nor send any information
# only a GET request to http://grafana.com to get latest versions
;check_for_updates = true
# Google Analytics universal tracking code, only enabled if you specify an id here
;google_analytics_ua_id =
#################################### Security ####################################
[security]
# default admin user, created on startup
;admin_user = admin
# default admin password, can be changed before first start of grafana, or in profile settings
;admin_password = admin
# used for signing
;secret_key = SW2YcwTIb9zpOOhoPsMm
# Auto-login remember days
;login_remember_days = 7
;cookie_username = grafana_user
;cookie_remember_name = grafana_remember
# disable gravatar profile images
;disable_gravatar = false
# data source proxy whitelist (ip_or_domain:port separated by spaces)
;data_source_proxy_whitelist =
# disable protection against brute force login attempts
;disable_brute_force_login_protection = false
#################################### Snapshots ###########################
[snapshots]
# snapshot sharing options
;external_enabled = true
;external_snapshot_url = https://snapshots-origin.raintank.io
;external_snapshot_name = Publish to snapshot.raintank.io
# remove expired snapshot
;snapshot_remove_expired = true
#################################### Dashboards History ##################
[dashboards]
# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1
;versions_to_keep = 20
#################################### Users ###############################
[users]
# disable user signup / registration
;allow_sign_up = true
# Allow non admin users to create organizations
;allow_org_create = true
# Set to true to automatically assign new users to the default organization (id 1)
;auto_assign_org = true
# Default role new users will be automatically assigned (if disabled above is set to true)
;auto_assign_org_role = Viewer
# Background text for the user field on the login page
;login_hint = email or username
# Default UI theme ("dark" or "light")
;default_theme = dark
# External user management, these options affect the organization users view
;external_manage_link_url =
;external_manage_link_name =
;external_manage_info =
# Viewers can edit/inspect dashboard settings in the browser. But not save the dashboard.
;viewers_can_edit = false
[auth]
# Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false
;disable_login_form = false
# Set to true to disable the signout link in the side menu. useful if you use auth.proxy, defaults to false
;disable_signout_menu = false
# URL to redirect the user to after sign out
;signout_redirect_url =
#################################### Anonymous Auth ##########################
[auth.anonymous]
# enable anonymous access
;enabled = false
# specify organization name that should be used for unauthenticated users
;org_name = Main Org.
# specify role for unauthenticated users
;org_role = Viewer
#################################### Github Auth ##########################
[auth.github]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email,read:org
;auth_url = https://github.com/login/oauth/authorize
;token_url = https://github.com/login/oauth/access_token
;api_url = https://api.github.com/user
;team_ids =
;allowed_organizations =
#################################### Google Auth ##########################
[auth.google]
;enabled = false
;allow_sign_up = true
;client_id = some_client_id
;client_secret = some_client_secret
;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
;auth_url = https://accounts.google.com/o/oauth2/auth
;token_url = https://accounts.google.com/o/oauth2/token
;api_url = https://www.googleapis.com/oauth2/v1/userinfo
;allowed_domains =
#################################### Generic OAuth ##########################
[auth.generic_oauth]
;enabled = true
;name = Cloudron
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email,read:org
;auth_url = https://foo.bar/login/oauth/authorize
;token_url = https://foo.bar/login/oauth/access_token
;api_url = https://foo.bar/user
;team_ids =
;allowed_organizations =
#################################### Grafana.com Auth ####################
[auth.grafana_com]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email
;allowed_organizations =
#################################### Auth Proxy ##########################
[auth.proxy]
{{ with service "traefik" -}}
enabled = true
header_name = X-WEBAUTH-USER
header_property = username
auto_sign_up = true
{{- $last := len . | subtract 1 -}}
{{- $services := . -}}
whitelist = {{ range $i := loop $last -}}
{{- with index $services $i }}{{ .Address }},{{ end -}}
{{- end -}}
{{- with index . $last }}{{ .Address }}{{ end -}}
{{- end }}
#################################### Basic Auth ##########################
[auth.basic]
;enabled = true
#################################### Auth LDAP ##########################
[auth.ldap]
;enabled = false
;config_file = /etc/grafana/ldap.toml
;allow_sign_up = true
#################################### SMTP / Emailing ##########################
[smtp]
enabled = true
host = smtp.mailgun.org:587
user = from_env
# If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
password = from_env
;cert_file =
;key_file =
;skip_verify = false
from_address = grafana@iamthefij.com
from_name = Grafana
# EHLO identity in SMTP dialog (defaults to instance_name)
;ehlo_identity = dashboard.example.com
[emails]
;welcome_email_on_sign_up = false
#################################### Logging ##########################
[log]
# Either "console", "file", "syslog". Default is console and file
# Use space to separate multiple modes, e.g. "console file"
;mode = console file
# Either "debug", "info", "warn", "error", "critical", default is "info"
;level = info
# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
;filters =
# For "console" mode only
[log.console]
;level =
# log line format, valid options are text, console and json
;format = console
# For "file" mode only
[log.file]
;level =
# log line format, valid options are text, console and json
;format = text
# This enables automated log rotate(switch of following options), default is true
;log_rotate = true
# Max line number of single file, default is 1000000
;max_lines = 1000000
# Segment log daily, default is true
;daily_rotate = true
# Expired days of log file(delete after max days), default is 7
;max_days = 7
[log.syslog]
;level =
# log line format, valid options are text, console and json
;format = text
# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
;network =
;address =
# Syslog facility. user, daemon and local0 through local7 are valid.
;facility =
# Syslog tag. By default, the process' argv[0] is used.
;tag =
#################################### Alerting ############################
[alerting]
# Disable alerting engine & UI features
;enabled = true
# Makes it possible to turn off alert rule execution but alerting UI is visible
;execute_alerts = true
#################################### Explore #############################
[explore]
# Enable the Explore section
;enabled = false
#################################### Internal Grafana Metrics ##########################
# Metrics available at HTTP API Url /metrics
[metrics]
# Disable / Enable internal metrics
enabled = true
# Publish interval
;interval_seconds = 10
# Send internal metrics to Graphite
[metrics.graphite]
# Enable by setting the address setting (ex localhost:2003)
;address =
;prefix = prod.grafana.%(instance_name)s.
#################################### Distributed tracing ############
[tracing.jaeger]
# Enable by setting the address sending traces to jaeger (ex localhost:6831)
;address = localhost:6831
# Tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
;always_included_tag = tag1:value1
# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
;sampler_type = const
# jaeger samplerconfig param
# for "const" sampler, 0 or 1 for always false/true respectively
# for "probabilistic" sampler, a probability between 0 and 1
# for "rateLimiting" sampler, the number of spans per second
# for "remote" sampler, param is the same as for "probabilistic"
# and indicates the initial sampling rate before the actual one
# is received from the mothership
;sampler_param = 1
#################################### Grafana.com integration ##########################
# Url used to to import dashboards directly from Grafana.com
[grafana_com]
;url = https://grafana.com
#################################### External image storage ##########################
[external_image_storage]
# Used for uploading images to public servers so they can be included in slack/email messages.
# you can choose between (s3, webdav, gcs, azure_blob, local)
provider = s3
[external_image_storage.s3]
endpoint = https://minio.thefij.rocks
bucket = grafana-images
region = us-east-1
path_style_access = true
;path =
;access_key =
;secret_key =
[external_image_storage.webdav]
;url =
;public_url =
;username =
;password =
[external_image_storage.gcs]
;key_file =
;bucket =
;path =
[external_image_storage.azure_blob]
;account_name =
;account_key =
;container_name =
[external_image_storage.local]
# does not require any configuration
[rendering]
server_url = http://renderer:8081/render
callback_url = http://grafana:3000/
concurrent_render_request_limit = 3

View File

@ -1,428 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"links": [],
"panels": [
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"#d44a3a",
"rgba(237, 129, 40, 0.89)",
"#299c46"
],
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 7,
"w": 4,
"x": 0,
"y": 0
},
"id": 2,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": true
},
"tableColumn": "",
"targets": [
{
"expr": "count(minitor_monitor_up_count)",
"format": "time_series",
"instant": true,
"intervalFactor": 2,
"refId": "A"
}
],
"thresholds": "0,1",
"title": "Total Monitors",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "current"
},
{
"cacheTimeout": null,
"colorBackground": true,
"colorPrefix": false,
"colorValue": false,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"decimals": 0,
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 7,
"w": 4,
"x": 4,
"y": 0
},
"id": 6,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": true
},
"tableColumn": "",
"targets": [
{
"expr": "count(minitor_monitor_up_count)-count(minitor_monitor_up_count>=1)",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
"refId": "A"
}
],
"thresholds": "1,2,3",
"timeFrom": null,
"title": "Total Down Services",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "current"
},
{
"alert": {
"conditions": [
{
"evaluator": {
"params": [
1
],
"type": "lt"
},
"operator": {
"type": "and"
},
"query": {
"params": [
"A",
"5m",
"now"
]
},
"reducer": {
"params": [],
"type": "last"
},
"type": "query"
}
],
"executionErrorState": "alerting",
"for": "0m",
"frequency": "60s",
"handler": 1,
"name": "Service Status alert",
"noDataState": "no_data",
"notifications": []
},
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"decimals": 0,
"fill": 1,
"gridPos": {
"h": 7,
"w": 14,
"x": 8,
"y": 0
},
"id": 4,
"legend": {
"alignAsTable": false,
"avg": false,
"current": true,
"max": false,
"min": false,
"rightSide": false,
"show": true,
"total": false,
"values": true
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(minitor_monitor_up_count) by (monitor)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{monitor}}",
"refId": "A"
}
],
"thresholds": [
{
"colorMode": "critical",
"fill": true,
"line": true,
"op": "lt",
"value": 1
}
],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Service Status",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"columns": [],
"fontSize": "100%",
"gridPos": {
"h": 11,
"w": 22,
"x": 0,
"y": 7
},
"id": 8,
"links": [],
"options": {},
"pageSize": null,
"scroll": true,
"showHeader": true,
"sort": {
"col": 0,
"desc": true
},
"styles": [
{
"alias": "Time",
"dateFormat": "YYYY-MM-DD HH:mm:ss",
"pattern": "Time",
"type": "date"
},
{
"alias": "Status",
"colorMode": "cell",
"colors": [
"#F2495C",
"#F2495C",
"rgba(50, 172, 45, 0.97)"
],
"decimals": 0,
"link": false,
"mappingType": 1,
"pattern": "Value",
"thresholds": [
"0",
"1"
],
"type": "string",
"unit": "short",
"valueMaps": [
{
"text": "Ok",
"value": "1"
},
{
"text": "Not Ok",
"value": "0"
}
]
}
],
"targets": [
{
"expr": "minitor_monitor_up_count",
"format": "time_series",
"instant": true,
"intervalFactor": 1,
"legendFormat": "{{monitor}}",
"refId": "A"
}
],
"timeFrom": null,
"timeShift": null,
"title": "Monitor status",
"transform": "timeseries_to_rows",
"type": "table"
}
],
"refresh": "30s",
"schemaVersion": 18,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Minitor Monitor",
"uid": "qoE5Hrxiz",
"version": 3
}

View File

@ -1,732 +0,0 @@
{
"__inputs": [
{
"name": "DS_PROMETHEUS",
"label": "Prometheus",
"description": "",
"type": "datasource",
"pluginId": "prometheus",
"pluginName": "Prometheus"
}
],
"__requires": [
{
"type": "panel",
"id": "singlestat",
"name": "Singlestat",
"version": ""
},
{
"type": "panel",
"id": "text",
"name": "Text",
"version": ""
},
{
"type": "panel",
"id": "graph",
"name": "Graph",
"version": ""
},
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "3.1.0"
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
}
],
"id": null,
"title": "Prometheus Stats",
"tags": [],
"style": "dark",
"timezone": "browser",
"editable": true,
"hideControls": true,
"sharedCrosshair": false,
"rows": [
{
"collapse": false,
"editable": true,
"height": 178,
"panels": [
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"rgba(245, 54, 54, 0.9)",
"rgba(237, 129, 40, 0.89)",
"rgba(50, 172, 45, 0.97)"
],
"datasource": "$${DS_PROMETHEUS}",
"decimals": 1,
"editable": true,
"error": false,
"format": "s",
"id": 5,
"interval": null,
"links": [],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"span": 3,
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": false
},
"targets": [
{
"expr": "(time() - process_start_time_seconds{job=\"prometheus\"})",
"intervalFactor": 2,
"refId": "A",
"step": 4
}
],
"thresholds": "",
"title": "Uptime",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "current",
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"rangeMaps": [
{
"from": "null",
"to": "null",
"text": "N/A"
}
],
"mappingType": 1,
"gauge": {
"show": false,
"minValue": 0,
"maxValue": 100,
"thresholdMarkers": true,
"thresholdLabels": false
}
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"rgba(50, 172, 45, 0.97)",
"rgba(237, 129, 40, 0.89)",
"rgba(245, 54, 54, 0.9)"
],
"datasource": "$${DS_PROMETHEUS}",
"editable": true,
"error": false,
"format": "none",
"id": 6,
"interval": null,
"links": [],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"span": 3,
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": true
},
"targets": [
{
"expr": "prometheus_local_storage_memory_series",
"intervalFactor": 2,
"refId": "A",
"step": 4
}
],
"thresholds": "1,5",
"title": "Local Storage Memory Series",
"type": "singlestat",
"valueFontSize": "70%",
"valueMaps": [],
"valueName": "current",
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"rangeMaps": [
{
"from": "null",
"to": "null",
"text": "N/A"
}
],
"mappingType": 1,
"gauge": {
"show": false,
"minValue": 0,
"maxValue": 100,
"thresholdMarkers": true,
"thresholdLabels": false
}
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": true,
"colors": [
"rgba(50, 172, 45, 0.97)",
"rgba(237, 129, 40, 0.89)",
"rgba(245, 54, 54, 0.9)"
],
"datasource": "$${DS_PROMETHEUS}",
"editable": true,
"error": false,
"format": "none",
"id": 7,
"interval": null,
"links": [],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"span": 3,
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": true
},
"targets": [
{
"expr": "prometheus_local_storage_indexing_queue_length",
"intervalFactor": 2,
"refId": "A",
"step": 4
}
],
"thresholds": "500,4000",
"title": "Interal Storage Queue Length",
"type": "singlestat",
"valueFontSize": "70%",
"valueMaps": [
{
"op": "=",
"text": "Empty",
"value": "0"
}
],
"valueName": "current",
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"rangeMaps": [
{
"from": "null",
"to": "null",
"text": "N/A"
}
],
"mappingType": 1,
"gauge": {
"show": false,
"minValue": 0,
"maxValue": 100,
"thresholdMarkers": true,
"thresholdLabels": false
}
},
{
"content": "<img src=\"http://prometheus.io/assets/prometheus_logo_grey.svg\" alt=\"Prometheus logo\" style=\"height: 40px;\">\n<span style=\"font-family: 'Open Sans', 'Helvetica Neue', Helvetica; font-size: 25px;vertical-align: text-top;color: #bbbfc2;margin-left: 10px;\">Prometheus</span>\n\n<p style=\"margin-top: 10px;\">You're using Prometheus, an open-source systems monitoring and alerting toolkit originally built at SoundCloud. For more information, check out the <a href=\"http://www.grafana.org/\">Grafana</a> and <a href=\"http://prometheus.io/\">Prometheus</a> projects.</p>",
"editable": true,
"error": false,
"id": 9,
"links": [],
"mode": "html",
"span": 3,
"style": {},
"title": "",
"transparent": true,
"type": "text"
}
],
"title": "New row"
},
{
"collapse": false,
"editable": true,
"height": 227,
"panels": [
{
"aliasColors": {
"prometheus": "#C15C17",
"{instance=\"localhost:9090\",job=\"prometheus\"}": "#C15C17"
},
"bars": false,
"datasource": "$${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fill": 1,
"grid": {
"threshold1": null,
"threshold1Color": "rgba(216, 200, 27, 0.27)",
"threshold2": null,
"threshold2Color": "rgba(234, 112, 112, 0.22)"
},
"id": 3,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 2,
"links": [],
"nullPointMode": "connected",
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 9,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "rate(prometheus_local_storage_ingested_samples_total[5m])",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{job}}",
"metric": "",
"refId": "A",
"step": 2
}
],
"timeFrom": null,
"timeShift": null,
"title": "Samples ingested (rate-5m)",
"tooltip": {
"shared": true,
"value_type": "cumulative",
"ordering": "alphabetical",
"msResolution": false
},
"type": "graph",
"yaxes": [
{
"show": true,
"min": null,
"max": null,
"logBase": 1,
"format": "short"
},
{
"show": true,
"min": null,
"max": null,
"logBase": 1,
"format": "short"
}
],
"xaxis": {
"show": true
}
},
{
"content": "#### Samples Ingested\nThis graph displays the count of samples ingested by the Prometheus server, as measured over the last 5 minutes, per time series in the range vector. When troubleshooting an issue on IRC or Github, this is often the first stat requested by the Prometheus team. ",
"editable": true,
"error": false,
"id": 8,
"links": [],
"mode": "markdown",
"span": 2.995914043583536,
"style": {},
"title": "",
"transparent": true,
"type": "text"
}
],
"title": "New row"
},
{
"collapse": false,
"editable": true,
"height": "250px",
"panels": [
{
"aliasColors": {
"prometheus": "#F9BA8F",
"{instance=\"localhost:9090\",interval=\"5s\",job=\"prometheus\"}": "#F9BA8F"
},
"bars": false,
"datasource": "$${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fill": 1,
"grid": {
"threshold1": null,
"threshold1Color": "rgba(216, 200, 27, 0.27)",
"threshold2": null,
"threshold2Color": "rgba(234, 112, 112, 0.22)"
},
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 2,
"links": [],
"nullPointMode": "connected",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 5,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "rate(prometheus_target_interval_length_seconds_count[5m])",
"intervalFactor": 2,
"legendFormat": "{{job}}",
"refId": "A",
"step": 2
}
],
"timeFrom": null,
"timeShift": null,
"title": "Target Scrapes (last 5m)",
"tooltip": {
"shared": true,
"value_type": "cumulative",
"ordering": "alphabetical",
"msResolution": false
},
"type": "graph",
"yaxes": [
{
"show": true,
"min": null,
"max": null,
"logBase": 1,
"format": "short"
},
{
"show": true,
"min": null,
"max": null,
"logBase": 1,
"format": "short"
}
],
"xaxis": {
"show": true
}
},
{
"aliasColors": {},
"bars": false,
"datasource": "$${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fill": 1,
"grid": {
"threshold1": null,
"threshold1Color": "rgba(216, 200, 27, 0.27)",
"threshold2": null,
"threshold2Color": "rgba(234, 112, 112, 0.22)"
},
"id": 14,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 2,
"links": [],
"nullPointMode": "connected",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 4,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "prometheus_target_interval_length_seconds{quantile!=\"0.01\", quantile!=\"0.05\"}",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{quantile}} ({{interval}})",
"metric": "",
"refId": "A",
"step": 2
}
],
"timeFrom": null,
"timeShift": null,
"title": "Scrape Duration",
"tooltip": {
"shared": true,
"value_type": "cumulative",
"ordering": "alphabetical",
"msResolution": false
},
"type": "graph",
"yaxes": [
{
"show": true,
"min": null,
"max": null,
"logBase": 1,
"format": "short"
},
{
"show": true,
"min": null,
"max": null,
"logBase": 1,
"format": "short"
}
],
"xaxis": {
"show": true
}
},
{
"content": "#### Scrapes\nPrometheus scrapes metrics from instrumented jobs, either directly or via an intermediary push gateway for short-lived jobs. Target scrapes will show how frequently targets are scraped, as measured over the last 5 minutes, per time series in the range vector. Scrape Duration will show how long the scrapes are taking, with percentiles available as series. ",
"editable": true,
"error": false,
"id": 11,
"links": [],
"mode": "markdown",
"span": 3,
"style": {},
"title": "",
"transparent": true,
"type": "text"
}
],
"title": "New row"
},
{
"collapse": false,
"editable": true,
"height": "250px",
"panels": [
{
"aliasColors": {},
"bars": false,
"datasource": "$${DS_PROMETHEUS}",
"decimals": null,
"editable": true,
"error": false,
"fill": 1,
"grid": {
"threshold1": null,
"threshold1Color": "rgba(216, 200, 27, 0.27)",
"threshold2": null,
"threshold2Color": "rgba(234, 112, 112, 0.22)"
},
"id": 12,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"hideEmpty": true,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 2,
"links": [],
"nullPointMode": "connected",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"span": 9,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "prometheus_evaluator_duration_milliseconds{quantile!=\"0.01\", quantile!=\"0.05\"}",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{quantile}}",
"refId": "A",
"step": 2
}
],
"timeFrom": null,
"timeShift": null,
"title": "Rule Eval Duration",
"tooltip": {
"shared": true,
"value_type": "cumulative",
"ordering": "alphabetical",
"msResolution": false
},
"type": "graph",
"yaxes": [
{
"show": true,
"min": null,
"max": null,
"logBase": 1,
"format": "percentunit",
"label": ""
},
{
"show": true,
"min": null,
"max": null,
"logBase": 1,
"format": "short"
}
],
"xaxis": {
"show": true
}
},
{
"content": "#### Rule Evaluation Duration\nThis graph panel plots the duration for all evaluations to execute. The 50th percentile, 90th percentile and 99th percentile are shown as three separate series to help identify outliers that may be skewing the data.",
"editable": true,
"error": false,
"id": 15,
"links": [],
"mode": "markdown",
"span": 3,
"style": {},
"title": "",
"transparent": true,
"type": "text"
}
],
"title": "New row"
}
],
"time": {
"from": "now-5m",
"to": "now"
},
"timepicker": {
"now": true,
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"templating": {
"list": []
},
"annotations": {
"list": []
},
"refresh": false,
"schemaVersion": 12,
"version": 0,
"links": [
{
"icon": "info",
"tags": [],
"targetBlank": true,
"title": "Grafana Docs",
"tooltip": "",
"type": "link",
"url": "http://www.grafana.org/docs"
},
{
"icon": "info",
"tags": [],
"targetBlank": true,
"title": "Prometheus Docs",
"type": "link",
"url": "http://prometheus.io/docs/introduction/overview/"
}
],
"gnetId": 2,
"description": "The official, pre-built Prometheus Stats Dashboard."
}

View File

@ -1,680 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"description": "Traefik dashboard prometheus",
"editable": true,
"gnetId": 4475,
"graphTooltip": 0,
"id": 8,
"iteration": 1540859046582,
"links": [],
"panels": [
{
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
"id": 10,
"title": "$backend stats",
"type": "row"
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": true,
"colors": [
"#d44a3a",
"rgba(237, 129, 40, 0.89)",
"#299c46"
],
"datasource": "Prometheus",
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 1
},
"id": 1,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": false
},
"tableColumn": "",
"targets": [
{
"expr": "sum(traefik_backend_server_up{backend=~\"$${backend:regex}\"})/count(traefik_config_reloads_total)",
"format": "time_series",
"intervalFactor": 2,
"refId": "A"
}
],
"thresholds": "0,1",
"title": "$backend status",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "OK",
"value": "1"
}
],
"valueName": "current"
},
{
"aliasColors": {},
"breakPoint": "50%",
"cacheTimeout": null,
"combine": {
"label": "Others",
"threshold": 0
},
"datasource": "Prometheus",
"fontSize": "80%",
"format": "short",
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 1
},
"id": 2,
"interval": null,
"legend": {
"percentage": true,
"show": true,
"values": true
},
"legendType": "Right side",
"links": [],
"maxDataPoints": 3,
"nullPointMode": "connected",
"pieType": "pie",
"strokeWidth": 1,
"targets": [
{
"expr": "traefik_backend_requests_total{backend=~\"$${backend:regex}\"}",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{method}} : {{code}}",
"refId": "A"
}
],
"title": "$backend return code",
"type": "grafana-piechart-panel",
"valueName": "current"
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": "Prometheus",
"format": "ms",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
"y": 1
},
"id": 4,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": true
},
"tableColumn": "",
"targets": [
{
"expr": "sum(traefik_backend_request_duration_seconds_sum{backend=~\"$${backend:regex}\"}) / sum(traefik_backend_requests_total{backend=~\"$${backend:regex}\"}) * 1000",
"format": "time_series",
"intervalFactor": 2,
"refId": "A"
}
],
"thresholds": "",
"title": "$backend response time",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "avg"
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "Prometheus",
"fill": 1,
"gridPos": {
"h": 7,
"w": 24,
"x": 0,
"y": 8
},
"id": 3,
"legend": {
"alignAsTable": true,
"avg": true,
"current": false,
"max": true,
"min": true,
"rightSide": false,
"show": true,
"total": false,
"values": true
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(traefik_backend_requests_total{backend=~\"$${backend:regex}\"}[5m]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "Total requests $backend",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Total requests over 5min $backend",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 15
},
"id": 12,
"panels": [],
"title": "Global stats",
"type": "row"
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "Prometheus",
"fill": 1,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 16
},
"id": 5,
"legend": {
"alignAsTable": true,
"avg": false,
"current": true,
"max": true,
"min": true,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": true,
"steppedLine": false,
"targets": [
{
"expr": "rate(traefik_entrypoint_requests_total{entrypoint=~\"$entrypoint\",code=\"200\"}[5m])",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{method}} : {{code}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Status code 200 over 5min",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": "Prometheus",
"fill": 1,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 16
},
"id": 6,
"legend": {
"alignAsTable": true,
"avg": false,
"current": true,
"max": true,
"min": true,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": true,
"steppedLine": false,
"targets": [
{
"expr": "rate(traefik_entrypoint_requests_total{entrypoint=~\"$entrypoint\",code!=\"200\"}[5m])",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{ method }} : {{code}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Others status code over 5min",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"breakPoint": "50%",
"cacheTimeout": null,
"combine": {
"label": "Others",
"threshold": 0
},
"datasource": "Prometheus",
"fontSize": "80%",
"format": "short",
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
"y": 23
},
"id": 7,
"interval": null,
"legend": {
"show": true,
"values": true
},
"legendType": "Right side",
"links": [],
"maxDataPoints": 3,
"nullPointMode": "connected",
"pieType": "pie",
"strokeWidth": 1,
"targets": [
{
"expr": "sum(rate(traefik_backend_requests_total[5m])) by (backend) ",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{ backend }}",
"refId": "A"
}
],
"title": "Requests by service",
"type": "grafana-piechart-panel",
"valueName": "total"
},
{
"aliasColors": {},
"breakPoint": "50%",
"cacheTimeout": null,
"combine": {
"label": "Others",
"threshold": 0
},
"datasource": "Prometheus",
"fontSize": "80%",
"format": "short",
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
"y": 23
},
"id": 8,
"interval": null,
"legend": {
"show": true,
"values": true
},
"legendType": "Right side",
"links": [],
"maxDataPoints": 3,
"nullPointMode": "connected",
"pieType": "pie",
"strokeWidth": 1,
"targets": [
{
"expr": "sum(rate(traefik_entrypoint_requests_total{entrypoint =~ \"$entrypoint\"}[5m])) by (entrypoint) ",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{ entrypoint }}",
"refId": "A"
}
],
"title": "Requests by protocol",
"type": "grafana-piechart-panel",
"valueName": "total"
}
],
"schemaVersion": 16,
"style": "dark",
"tags": [
"traefik",
"prometheus"
],
"templating": {
"list": [
{
"allValue": "",
"current": {},
"datasource": "Prometheus",
"hide": 0,
"includeAll": true,
"label": null,
"multi": false,
"name": "backend",
"options": [],
"query": "label_values(traefik_backend_server_up, backend)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"allValue": null,
"current": {},
"datasource": "Prometheus",
"hide": 0,
"includeAll": true,
"label": null,
"multi": true,
"name": "entrypoint",
"options": [],
"query": "label_values(traefik_entrypoint_requests_total, entrypoint)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Traefik",
"uid": "qPdAviJmz",
"version": 5
}

View File

@ -1,7 +0,0 @@
apiVersion: 1
providers:
- name: default
folder: 'Provisioned'
type: file
options:
path: /etc/grafana/provisioning/dashboards/default

View File

@ -1,10 +0,0 @@
---
apiVersion: 1
datasources:
- name: Loki
url: http://{{ env "NOMAD_UPSTREAM_ADDR_loki" }}
type: loki
access: proxy
isDefault: false
version: 1

View File

@ -1,10 +0,0 @@
---
apiVersion: 1
datasources:
- name: Prometheus
url: http://{{ env "NOMAD_UPSTREAM_ADDR_prometheus" }}
type: prometheus
access: proxy
isDefault: true
version: 1

View File

@ -1,11 +0,0 @@
---
{{ with secret "kv/data/grafana" -}}
notifiers:
- name: Personal email
type: email
uid: email-1
org_id: 1
is_default: false
settings:
addresses: "{{ .Data.data.alert_email_addresses }}"
{{ end -}}

View File

@ -1,27 +0,0 @@
---
{{ with secret "kv/data/slack" -}}
notifiers:
- name: Slack Bot
type: slack
uid: slack-1
org_id: 1
is_default: false
settings:
url: "{{ .Data.data.bot_url }}"
recipient: "#site-notifications"
username: Grafana Alerts
icon_url: https://grafana.iamthefij.com/public/img/grafana_icon.svg
token: "{{ .Data.data.bot_token }}"
uploadImage: true
mentionChannel: channel
- name: Slack Hook
type: slack
uid: slack-2
org_id: 1
is_default: true
settings:
url: "{{ .Data.data.hook_url }}"
icon_url: https://grafana.iamthefij.com/public/img/grafana_icon.svg
uploadImage: true
mentionChannel: channel
{{ end -}}

View File

@ -1,174 +0,0 @@
job "syslogng" {
datacenters = ["dc1"]
type = "service"
group "promtail" {
count = 1
network {
mode = "bridge"
port "main" {
to = 1514
}
port "metrics" {
to = 9080
}
}
service {
name = "syslogng-promtail"
port = "main"
connect {
sidecar_service {
proxy {
local_service_port = 1514
upstreams {
destination_name = "loki"
local_bind_port = 1000
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
memory_max = 50
}
}
}
}
task "promtail" {
driver = "docker"
config {
image = "grafana/promtail:2.2.1"
ports = ["main", "metrics"]
args = ["--config.file=/etc/promtail/promtail.yml"]
mount {
type = "bind"
target = "/etc/promtail/promtail.yml"
source = "local/promtail.yml"
}
}
template {
data = <<EOF
---
server:
http_listen_address: 0.0.0.0
http_listen_port: 9080
clients:
- url: http://{{ env "NOMAD_UPSTREAM_ADDR_loki" }}/loki/api/v1/push
scrape_configs:
# TCP syslog receiver
- job_name: syslog
syslog:
listen_address: 0.0.0.0:{{ env "NOMAD_PORT_main" }}
labels:
job: syslog
relabel_configs:
- source_labels: ['__syslog_message_hostname']
target_label: hostname
EOF
destination = "local/promtail.yml"
}
resources {
cpu = 50
memory = 20
}
}
}
group "syslogng" {
count = 1
network {
mode = "bridge"
port "main" {
to = 514
}
}
service {
name = "syslogng"
port = "main"
connect {
sidecar_service {
proxy {
local_service_port = 514
upstreams {
destination_name = "syslogng-promtail"
local_bind_port = 1000
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
memory_max = 50
}
}
}
}
task "syslogng" {
driver = "docker"
config {
image = "balabit/syslog-ng:3.37.1"
ports = ["main"]
args = ["--no-caps"]
mount {
type = "bind"
target = "/etc/syslog-ng/syslog-ng.conf"
source = "local/syslog-ng.conf"
}
}
template {
data = <<EOF
@version: 3.37
@include "scl.conf"
source s_network {
default-network-drivers(
);
};
source s_internal {
internal();
};
destination d_loki {
# Forward to Connect proxy to Promtail
syslog("{{ env "NOMAD_UPSTREAM_IP_syslogng-promtail" }}" transport("tcp") port({{ env "NOMAD_UPSTREAM_PORT_syslogng-promtail" }}));
};
log { source(s_internal); destination(d_loki); };
log { source(s_network); destination(d_loki); };
EOF
destination = "local/syslog-ng.conf"
}
resources {
cpu = 50
memory = 10
}
}
}
}

View File

@ -1,21 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.17"
hashes = [
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
]
}

View File

@ -1,302 +0,0 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
job "traefik" {
datacenters = ["dc1"]
type = "service"
priority = 100
constraint {
attribute = "${node.class}"
value = "ingress"
}
constraint {
distinct_hosts = true
}
update {
max_parallel = 1
# canary = 1
# auto_promote = true
auto_revert = true
}
group "traefik" {
count = 1
network {
port "web" {
static = 80
}
port "websecure" {
static = 443
}
port "syslog" {
static = 514
}
}
ephemeral_disk {
migrate = true
sticky = true
}
service {
name = "traefik"
port = "web"
check {
type = "http"
path = "/ping"
port = "web"
interval = "10s"
timeout = "2s"
}
connect {
native = true
}
tags = [
"traefik.enable=true",
"traefik.http.routers.traefik.entryPoints=websecure",
"traefik.http.routers.traefik.service=api@internal",
]
}
task "traefik" {
driver = "docker"
config {
image = "traefik:2.6"
ports = ["web", "websecure"]
network_mode = "host"
mount {
type = "bind"
target = "/etc/traefik"
source = "local/config"
}
mount {
type = "bind"
target = "/etc/traefik/usersfile"
source = "secrets/usersfile"
}
}
vault {
policies = ["access-tables", "nomad-task"]
}
template {
# Avoid conflict with TOML lists [[ ]] and Go templates {{ }}
left_delimiter = "<<"
right_delimiter = ">>"
data = <<EOH
[log]
level = "DEBUG"
[entryPoints]
[entryPoints.web]
address = ":80"
[entryPoints.web.http]
[entryPoints.web.http.redirections]
[entryPoints.web.http.redirections.entrypoint]
to = "websecure"
scheme = "https"
[entryPoints.websecure]
address = ":443"
[entryPoints.websecure.http.tls]
<< if keyExists "traefik/acme/email" ->>
certResolver = "letsEncrypt"
[[entryPoints.websecure.http.tls.domains]]
main = "*.<< keyOrDefault "global/base_hostname" "${var.base_hostname}" >>"
<< end ->>
[entryPoints.metrics]
address = ":8989"
[entryPoints.syslogtcp]
address = ":514"
[entryPoints.syslogudp]
address = ":514/udp"
[api]
dashboard = true
[ping]
entrypoint = "web"
[metrics]
[metrics.prometheus]
entrypoint = "metrics"
# manualRouting = true
[providers.file]
directory = "/etc/traefik/conf"
watch = true
[providers.consulCatalog]
connectAware = true
connectByDefault = true
exposedByDefault = false
defaultRule = "Host(`{{normalize .Name}}.<< keyOrDefault "global/base_hostname" "${var.base_hostname}" >>`)"
[providers.consulCatalog.endpoint]
address = "http://<< env "CONSUL_HTTP_ADDR" >>"
<< if keyExists "traefik/acme/email" ->>
[certificatesResolvers.letsEncrypt.acme]
email = "<< key "traefik/acme/email" >>"
# Store in /local because /secrets doesn't persist with ephemeral disk
storage = "/local/acme.json"
[certificatesResolvers.letsEncrypt.acme.dnsChallenge]
provider = "cloudflare"
resolvers = ["1.1.1.1:53", "8.8.8.8:53"]
delayBeforeCheck = 0
<< end ->>
EOH
destination = "local/config/traefik.toml"
}
template {
data = <<EOH
{{ with secret "kv/data/cloudflare" }}
CF_DNS_API_TOKEN={{ .Data.data.api_token_dns_edit }}
CF_ZONE_API_TOKEN={{ .Data.data.api_token_zone_read }}
{{ end }}
EOH
destination = "secrets/cloudflare.env"
env = true
}
template {
data = <<EOH
[http]
[http.routers]
[http.routers.nomad]
entryPoints = ["websecure"]
# middlewares = []
service = "nomad"
rule = "Host(`nomad.{{ keyOrDefault "global/base_hostname" "${var.base_hostname}" }}`)"
[http.routers.consul]
entryPoints = ["websecure"]
# middlewares = []
service = "consul"
rule = "Host(`consul.{{ keyOrDefault "global/base_hostname" "${var.base_hostname}" }}`)"
[http.routers.vault]
entryPoints = ["websecure"]
# middlewares = []
service = "vault"
rule = "Host(`vault.{{ keyOrDefault "global/base_hostname" "${var.base_hostname}" }}`)"
[http.services]
{{ with service "nomad-client" -}}
[http.services.nomad]
[http.services.nomad.loadBalancer]
{{ range . -}}
[[http.services.nomad.loadBalancer.servers]]
url = "http://{{ .Address }}:{{ .Port }}"
{{ end }}
{{- end }}
{{ with service "consul" -}}
[http.services.consul]
[http.services.consul.loadBalancer]
{{ range . -}}
[[http.services.consul.loadBalancer.servers]]
# Not using .Port because that's an RPC port
url = "http://{{ .Address }}:8500"
{{ end }}
{{- end }}
{{ with service "vault" -}}
[http.services.vault]
[http.services.vault.loadBalancer]
[http.services.vault.loadBalancer.sticky.cookie]
{{ range . -}}
[[http.services.vault.loadBalancer.servers]]
url = "http://{{ .Address }}:{{ .Port }}"
{{ end }}
{{- end }}
EOH
destination = "local/config/conf/route-hashi.toml"
change_mode = "noop"
}
template {
data = <<EOH
{{ with service "syslogng" -}}
[tcp.routers]
[tcp.routers.syslogtcp]
entryPoints = ["syslogtcp"]
service = "syslogngtcp"
rule = "HostSNI(`*`)"
[tcp.services]
[tcp.services.syslogngtcp]
[tcp.services.syslogngtcp.loadBalancer]
{{ range . -}}
[[tcp.services.syslogngtcp.loadBalancer.servers]]
address = "{{ .Address }}:{{ .Port }}"
{{ end -}}
{{ end }}
{{ with service "syslogng" -}}
[udp.routers]
[udp.routers.syslogudp]
entryPoints = ["syslogudp"]
service = "syslogngudp"
[udp.services]
[udp.services.syslogngudp]
[udp.services.syslogngudp.loadBalancer]
{{ range . -}}
[[udp.services.syslogngudp.loadBalancer.servers]]
address = "{{ .Address }}:{{ .Port }}"
{{ end -}}
{{ end }}
EOH
destination = "local/config/conf/route-syslog-ng.toml"
change_mode = "noop"
}
template {
data = <<EOH
[http.middlewares]
{{ with secret "kv/data/traefik" }}
{{ if .Data.data.usersfile }}
[http.middlewares.basic-auth.basicAuth]
usersFile = "/etc/traefik/usersfile"
{{ end }}
{{ end }}
EOH
destination = "local/config/conf/middlewares.toml"
change_mode = "noop"
}
template {
data = <<EOH
{{ with secret "kv/data/traefik" }}
{{ .Data.data.usersfile }}
{{ end }}
EOH
destination = "secrets/usersfile"
change_mode = "noop"
}
resources {
cpu = 100
memory = 100
memory_max = 500
}
}
}
}

View File

@ -1,16 +0,0 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}
resource "nomad_job" "traefik" {
hcl2 {
enabled = true
vars = {
"base_hostname" = "${var.base_hostname}",
}
}
jobspec = file("${path.module}/traefik.nomad")
}

View File

@ -1,5 +0,0 @@
variable "base_hostname" {
type = string
description = "Base hostname to serve content from"
default = "dev.homelab"
}

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.1"
hashes = [
"h1:PexyQBRLDA+SR+sWlzYBZswry5O5h/tTfj87CaECtLc=",
"zh:1806830a3cf103e65e772a7d28fd4df2788c29a029fb2def1326bc777ad107ed",
"zh:252be544fb4c9daf09cad7d3776daf5fa66b62740d3ea9d6d499a7b1697c3433",
"zh:50985fe02a8e5ae47c75d7c28c911b25d7dc4716cff2ed55ca05889ab77a1f73",
"zh:54cf0ec90538703c66937c77e8d72a38d5af47437eb0b8b55eb5836c5d288878",
"zh:704f536c621337e06fffef6d5f49ac81f52d249f937250527c12884cb83aefed",
"zh:896d8ef6d0b555299f124eb25bce8a17d735da14ef21f07582098d301f47da30",
"zh:976277a85b0a0baafe267cc494f766448d1da5b6936ddcb3ce393bd4d22f08d2",
"zh:c7faa9a2b11bc45833a3e8e340f22f1ecf01597eaeffa7669234b4549d7dfa85",
"zh:caf851ef9c8ce482864badf7058f9278d4537112fa236efd8f1a9315801d9061",
"zh:db203435d58b0ac842540861b3307a623423275d85754c171773f3b210ae5b24",
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
"zh:f710a37190429045d109edd35de69db3b5f619919c2fa04c77a3a639fea9fd7d",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.17"
hashes = [
"h1:iPylWr144mqXvM8NBVMTm+MS6JRhqIihlpJG91GYDyA=",
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
]
}

View File

@ -25,6 +25,7 @@ job "adminer" {
upstreams {
destination_name = "mysql-server"
# TODO: how do I get these to not bind to the host eth0 address
local_bind_port = 4040
}

View File

@ -1,7 +1,6 @@
job "mysql-server" {
datacenters = ["dc1"]
type = "service"
priority = 80
group "mysql-server" {
count = 1
@ -57,18 +56,6 @@ job "mysql-server" {
task "mysql-server" {
driver = "docker"
config {
image = "mysql:8"
ports = ["db"]
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
volume_mount {
volume = "mysql-data"
destination = "/var/lib/mysql"
@ -76,18 +63,14 @@ job "mysql-server" {
}
env = {
"MYSQL_ROOT_PASSWORD" = "supersecretpassword"
# Allow connections from any host
"MYSQL_ROOT_HOST" = "%"
}
template {
data = <<EOH
{{ with secret "kv/data/mysql" }}
MYSQL_ROOT_PASSWORD={{ .Data.data.root_password }}
{{ end }}
EOH
destination = "secrets/db.env"
env = true
config {
image = "mysql:8"
ports = ["db"]
}
resources {

View File

@ -42,12 +42,6 @@ resource "consul_config_entry" "mysql_intents" {
Precedence = 9
Type = "consul"
},
{
Action = "allow"
Name = "grafana"
Precedence = 9
Type = "consul"
},
]
})
}

View File

@ -1,7 +1,6 @@
job "redis" {
datacenters = ["dc1"]
type = "service"
priority = 60
group "cache" {
count = 1
@ -48,19 +47,18 @@ job "redis" {
# }
}
task "redis" {
task "main" {
driver = "docker"
config {
image = "redis:6"
args = ["redis-server", "--save", "60", "1", "--loglevel", "warning", "--dir", "${NOMAD_ALLOC_DIR}/data"]
args = ["redis-server", "--save", "60", "1", "--loglevel", "warning"]
ports = ["main"]
}
resources {
cpu = 100
memory = 512
memory_max = 1024
memory = 1024
}
}
}

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/external" {
version = "2.2.2"
hashes = [
"h1:BKQ5f5ijzeyBSnUr+j0wUi+bYv6KBQVQNDXNRVEcfJE=",
"zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca",
"zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28",
"zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b",
"zh:719d6ef39c50e4cffc67aa67d74d195adaf42afcf62beab132dafdb500347d39",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:7fbfc4d37435ac2f717b0316f872f558f608596b389b895fcb549f118462d327",
"zh:8ac71408204db606ce63fe8f9aeaf1ddc7751d57d586ec421e62d440c402e955",
"zh:a4cacdb06f114454b6ed0033add28006afa3f65a0ea7a43befe45fc82e6809fb",
"zh:bb5ce3132b52ae32b6cc005bc9f7627b95259b9ffe556de4dad60d47d47f21f0",
"zh:bb60d2976f125ffd232a7ccb4b3f81e7109578b23c9c6179f13a11d125dca82a",
"zh:f9540ecd2e056d6e71b9ea5f5a5cf8f63dd5c25394b9db831083a9d4ea99b372",
"zh:ffd998b55b8a64d4335a090b6956b4bf8855b290f7554dd38db3302de9c41809",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.17"
hashes = [
"h1:oWV3VXZhqPZ8Ia07nlIZLeXDBqVULMg9lP3dVMczDCo=",
"zh:146f97eacd9a0c78b357a6cfd2cb12765d4b18e9660a75500ee3e748c6eba41a",
"zh:2eb89a6e5cee9aea03a96ea9f141096fe3baf219b2700ce30229d2d882f5015f",
"zh:3d0f971f79b615c1014c75e2f99f34bd4b4da542ca9f31d5ea7fadc4e9de39c1",
"zh:46099a750c752ce05aa14d663a86478a5ad66d95aff3d69367f1d3628aac7792",
"zh:71e56006b013dcfe1e4e059b2b07148b44fcd79351ae2c357e0d97e27ae0d916",
"zh:74febd25d776688f0558178c2f5a0e6818bbf4cdaa2e160d7049da04103940f0",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:af18c064a5f0dd5422d6771939274841f635b619ab392c73d5bf9720945fdb85",
"zh:c133d7a862079da9f06e301c530eacbd70e9288fa2276ec0704df907270ee328",
"zh:c894cf98d239b9f5a4b7cde9f5c836face0b5b93099048ee817b0380ea439c65",
"zh:c918642870f0cafdbe4d7dd07c909701fc3ddb47cac8357bdcde1327bf78c11d",
"zh:f8f5655099a57b4b9c0018a2d49133771e24c7ff8262efb1ceb140fd224aa9b6",
]
}

View File

@ -1,7 +0,0 @@
# Terraform Levant
This module renders a levant template and then creates a Nomad job based on that template.
It only covers a subset of levant capabilities because much else can be done with Terraform already.
required:

View File

@ -1,63 +0,0 @@
#! /usr/bin/env python3
import json
import sys
from subprocess import check_output
from typing import Optional
from typing import overload
from typing import TypeVar
T = TypeVar("T")
@overload
def get_json(d: dict[str, str], key: str, default: None = None) -> None:
...
@overload
def get_json(d: dict[str, str], key: str, default: T = None) -> T:
...
def get_json(d: dict[str, str], key: str, default: Optional[T] = None) -> Optional[T]:
if key not in d:
return default
return json.loads(d[key])
query = json.load(sys.stdin)
# Required
template_path = query["template_path"]
# Optional
consul_address = query.get("consul_address")
if consul_address is not None:
consul_address = f"-consul-address={consul_address}"
# Need to parse JSON back
variables = [
f'--var={key}={value}' for key, value in get_json(query, "variables", {}).items()
]
variable_files = [
f'--var-file={value}' for value in get_json(query, "var_files", [])
]
args: list[str] = list(
filter(
None,
["levant", "render", consul_address]
+ variables
+ variable_files
+ [template_path],
)
)
# print(" ".join(args), file=sys.stderr)
# exit(1)
template = check_output(args, stderr=sys.stderr)
print(json.dumps({"template": template.decode()}))

View File

@ -1,37 +0,0 @@
variable "template_path" {
type = string
nullable = false
}
variable "consul_address" {
type = string
default = null
nullable = true
description = "Consul host and port for making KeyValue lookups"
}
variable "variables" {
type = map(string)
description = "Variables to be passed into nomad-pack with values in JSON form"
default = {}
}
variable "var_files" {
type = list(string)
description = "HCL files containing variables to be used by nomad-pack"
default = []
}
data "external" "levant" {
program = ["${path.module}/levant.py"]
query = {
template_path = var.template_path
consul_address = var.consul_address
variables = jsonencode(var.variables)
var_files = jsonencode(var.var_files)
}
}
resource "nomad_job" "levant" {
jobspec = data.external.levant.result.template
}

View File

@ -1,2 +0,0 @@
job {
}

View File

@ -4,7 +4,6 @@
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",

View File

@ -5,7 +5,6 @@ provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.0"
hashes = [
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
"h1:tAb2gwW+oZ8/t2j7lExdqpNrxmaWsHbyA2crFWClPb0=",
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
@ -23,7 +22,6 @@ provider "registry.terraform.io/hashicorp/consul" {
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",

View File

@ -0,0 +1,208 @@
job "metrics" {
datacenters = ["dc1"]
type = "system"
group "exporters" {
network {
mode = "bridge"
port "cadvisor" {
host_network = "nomad-bridge"
to = 8080
}
port "node_exporter" {
host_network = "nomad-bridge"
to = 9100
}
port "promtail" {
host_network = "nomad-bridge"
to = 9080
}
port "expose" {
host_network = "nomad-bridge"
}
port "cadvisor_envoy_metrics" {
host_network = "nomad-bridge"
to = 9102
}
}
service {
name = "cadvisor"
port = "cadvisor"
meta {
metrics_addr = "${NOMAD_ADDR_expose}"
envoy_metrics_addr = "${NOMAD_ADDR_cadvisor_envoy_metrics}"
nomad_dc = "${NOMAD_DC}"
nomad_node_name = "${node.unique.name}"
}
connect {
sidecar_service {
proxy {
local_service_port = 8080
expose {
path {
path = "/metrics"
protocol = "http"
local_path_port = 8080
listener_port = "expose"
}
}
config {
envoy_prometheus_bind_addr = "0.0.0.0:9102"
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
check {
type = "http"
path = "/metrics"
port = "cadvisor"
interval = "10s"
timeout = "10s"
}
}
task "cadvisor" {
driver = "docker"
config {
# image = "iamthefij/cadvisor:0.37.5"
image = "gcr.io/cadvisor/cadvisor:v0.39.3"
args = ["--docker_only=true"]
ports = ["cadvisor"]
# volumes = [
# "/:/rootfs:ro",
# "/var/run:/var/run:rw",
# "/sys:/sys:ro",
# "/var/lib/docker/:/var/lib/docker:ro",
# "/cgroup:/cgroup:ro",
# "/etc/machine-id:/etc/machine-id:ro",
# ]
mount {
type = "bind"
source = "/"
target = "/rootfs"
readonly = true
}
mount {
type = "bind"
source = "/var/run"
target = "/var/run"
readonly = false
}
mount {
type = "bind"
source = "/sys"
target = "/sys"
readonly = true
}
mount {
type = "bind"
source = "/var/lib/docker"
target = "/var/lib/docker"
readonly = true
}
# mount {
# type = "bind"
# source = "/cgroup"
# target = "/cgroup"
# readonly = true
# }
mount {
type = "bind"
source = "/etc/machine-id"
target = "/etc/machine-id"
readonly = true
}
}
resources {
cpu = 50
memory = 100
}
}
service {
name = "nodeexporter"
port = "node_exporter"
meta {
metrics_addr = "${NOMAD_ADDR_node_exporter}"
nomad_dc = "${NOMAD_DC}"
nomad_node_name = "${node.unique.name}"
}
connect {
sidecar_service {
proxy {
local_service_port = 9100
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
check {
type = "http"
path = "/metrics"
port = "node_exporter"
interval = "10s"
timeout = "10s"
}
}
task "node_exporter" {
driver = "docker"
config {
image = "prom/node-exporter:v1.0.1"
args = ["--path.rootfs", "/host"]
ports = ["node_exporter"]
mount {
type = "bind"
source = "/"
target = "/host"
readonly = true
}
}
resources {
cpu = 50
memory = 50
}
}
}
}

126
nomad/metrics/grafana.nomad Normal file
View File

@ -0,0 +1,126 @@
job "grafana" {
datacenters = ["dc1"]
group "grafana" {
count = 1
network {
mode = "bridge"
port "web" {
host_network = "loopback"
to = 3000
}
}
ephemeral_disk {
migrate = true
sticky = true
}
service {
name = "grafana"
port = "web"
connect {
sidecar_service {
proxy {
local_service_port = 3000
upstreams {
destination_name = "prometheus"
local_bind_port = 9090
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
check {
type = "http"
path = "/"
port = "web"
interval = "10s"
timeout = "10s"
}
tags = [
"traefik.enable=true",
]
}
task "grafana" {
driver = "docker"
config {
image = "grafana/grafana:7.3.6"
ports = ["web"]
mount {
type = "bind"
target = "/etc/grafana/grafana.ini"
source = "local/config/grafana.ini"
}
mount {
type = "bind"
target = "/etc/grafana/provisioning"
source = "local/config/provisioning"
}
}
env = {
"GF_SECURITY_ADMIN_PASSWORD" = "password",
"GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel",
}
template {
data = <<EOF
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
data = /var/lib/grafana
# folder that contains provisioning config files that grafana will apply on startup and while running.
provisioning = /etc/grafana/provisioning
[server]
# Protocol (http, https, socket)
protocol = http
http_port = 3000
EOF
change_mode = "signal"
change_signal = "SIGHUP"
destination = "local/config/grafana.ini"
}
template {
data = <<EOF
---
apiVersion: 1
datasources:
- name: Prometheus
url: http://${NOMAD_UPSTREAM_ADDR_prometheus}
type: prometheus
access: proxy
isDefault: true
version: 1
EOF
change_mode = "signal"
change_signal = "SIGHUP"
destination = "local/config/provisioning/datasources/prometheus.yml"
}
resources {
cpu = 100
memory = 200
}
}
}
}

View File

@ -1,11 +1,16 @@
resource "nomad_job" "exporters" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/exporters.nomad")
variable "consul_address" {
type = string
description = "address of consul server for dynamic scraping"
}
# resource "nomad_job" "exporters" {
# hcl2 {
# enabled = true
# }
#
# jobspec = file("${path.module}/exporters.nomad")
# }
data "consul_nodes" "all-nodes" {
query_options {
datacenter = "dc1"
@ -15,6 +20,11 @@ data "consul_nodes" "all-nodes" {
resource "nomad_job" "prometheus" {
hcl2 {
enabled = true
vars = {
# "consul_address" = "${var.consul_address}",
# TODO: Should this be a list?
"consul_address" = "http://${data.consul_nodes.all-nodes.nodes[0].address}:8500",
}
}
jobspec = file("${path.module}/prometheus.nomad")
@ -25,9 +35,7 @@ resource "nomad_job" "grafana" {
enabled = true
}
jobspec = templatefile("${path.module}/grafana.nomad", {
module_path = "${path.module}"
})
jobspec = file("${path.module}/grafana.nomad")
}
resource "consul_config_entry" "prometheus_intent" {

View File

@ -1,3 +1,9 @@
variable "consul_address" {
type = string
description = "Full address of Consul instance to get catalog from"
default = "http://127.0.0.1:5400"
}
job "prometheus" {
datacenters = ["dc1"]
@ -59,7 +65,7 @@ job "prometheus" {
ports = ["web"]
args = [
"--config.file=/etc/prometheus/config/prometheus.yml",
"--storage.tsdb.path=${NOMAD_ALLOC_DIR}/data/tsdb",
"--storage.tsdb.path=/prometheus",
"--web.listen-address=0.0.0.0:9090",
"--web.console.libraries=/usr/share/prometheus/console_libraries",
"--web.console.templates=/usr/share/prometheus/consoles",
@ -85,13 +91,25 @@ scrape_configs:
- targets:
- 0.0.0.0:9090
- job_name: "nomad_server"
metrics_path: "/v1/metrics"
params:
format:
- "prometheus"
consul_sd_configs:
- server: "${var.consul_address}"
services:
- "nomad"
tags:
- "http"
- job_name: "nomad_client"
metrics_path: "/v1/metrics"
params:
format:
- "prometheus"
consul_sd_configs:
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
- server: "${var.consul_address}"
services:
- "nomad-client"
@ -101,7 +119,7 @@ scrape_configs:
format:
- "prometheus"
consul_sd_configs:
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
- server: "${var.consul_address}"
services:
- "consul"
relabel_configs:
@ -112,7 +130,7 @@ scrape_configs:
- job_name: "exporters"
metrics_path: "/metrics"
consul_sd_configs:
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
- server: "${var.consul_address}"
relabel_configs:
- source_labels: [__meta_consul_service]
action: drop
@ -134,7 +152,7 @@ scrape_configs:
- job_name: "envoy"
metrics_path: "/metrics"
consul_sd_configs:
- server: "http://{{env "attr.unique.network.ip-address"}}:8500"
- server: "${var.consul_address}"
relabel_configs:
- source_labels: [__meta_consul_service]
action: keep
@ -160,7 +178,7 @@ scrape_configs:
resources {
cpu = 100
memory = 300
memory = 200
}
}
}

View File

@ -2,28 +2,26 @@
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" {
version = "2.16.2"
version = "2.15.0"
hashes = [
"h1:epldE7sZPBTQHnWEA4WlNJIOVT1UEX+/02SMg5nniaE=",
"zh:0a2e11ca2ba650954951a087a1daec95eee2f3000456b295409a9880c4a10b1a",
"zh:34f6bda06a0d1c213fa8d87d4313687681e67bc8c40c4cbaa7dbe59ce24a4f7e",
"zh:5b85cf93db11ee890f720c317a38158927071feb634855786a0c0cd65825a43c",
"zh:75ef915f3d087e6045751a66fbb7066a852a0944ec8c97200d1134dd84df7ffc",
"zh:8a4a95697bd91ad51a581c12fe50ac61a114afba27895d027f77ac4154a7ea15",
"zh:973d538c8d72793861a1ac9718249a9493f417a2b5096846367560054fd843b9",
"zh:9feb2bdc06fdc2d8370cc9aad9a0c69e7e5ae38aac43f315c3f57507c57be030",
"zh:c5709672d0afecbbe298bf519741ebcb9d04f02a73b5ee0c186dfa241aa5a524",
"zh:c65c60570de6da7190e1e7762577655a463caeb59bc5d38e33034821ed0cbcb9",
"zh:c958d6282650fc472aade61d5df4300936033f43cfb898293ef86aceccdfdf1d",
"zh:cdd3632c81e1d11d3becd193aaa061688840f39147950c45c4301d042743ae6a",
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
"zh:32fa31329731db2bf4b7d0f09096416ca146f05b58f4482bbd4ee0f28cefbbcc",
"zh:59136b73d3abe7cc5b06d9e12d123ad21298ca86ed49a4060a3cd7c2a28a74a1",
"zh:a191f3210773ca25c543a92f2d392b85e6a053d596293655b1f25b33eb843b4c",
"zh:b8b6033cf0687eadc1099f11d9fb2ca9429ff40c2d85bd6cb047c0f6bc5d5d8d",
"zh:bb7d67ed28aa9b28fc5154161af003383f940b2beda0d4577857cad700f39cd1",
"zh:be615288f59327b975532a1999deab60a022e6819fe80e5a32526155210ecbba",
"zh:de1e3d5c34eef87eb301e74717754babb6dc8e19e3a964919e1165c5a076a719",
"zh:eb8c61b20d8ce2bfff9f735ca8456a0d6368af13aa1f43866f61c70f88cc491c",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",

View File

@ -0,0 +1,20 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}

View File

@ -5,8 +5,7 @@ job "Nextcloud" {
repo = "/local/repo"
# Read from secret file
# Either options.PasswordFile or using readfile()
# passphrase = "secret phrase"
passwordFile("tmp/passphrase")
passphrase = "secret phrase"
}
task "Create dir for repo" {

View File

@ -90,43 +90,21 @@ job "nextcloud" {
args = [
"/bin/bash",
"-c",
"/usr/bin/mysql --defaults-extra-file=/task/my.cnf < /task/bootstrap.sql",
"/usr/bin/mysql -h${NOMAD_UPSTREAM_IP_mysql_server} -P${NOMAD_UPSTREAM_PORT_mysql_server} -uroot -psupersecretpassword < /bootstrap.sql",
]
mount {
type = "bind"
source = "local/"
target = "/task/"
source = "local/bootstrap.sql"
target = "/bootstrap.sql"
}
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
template {
data = <<EOF
[client]
host={{ env "NOMAD_UPSTREAM_IP_mysql_server" }}
port={{ env "NOMAD_UPSTREAM_PORT_mysql_server" }}
user=root
{{ with secret "kv/data/mysql" }}
password={{ .Data.data.root_password }}
{{ end }}
EOF
destination = "local/my.cnf"
}
template {
data = <<EOF
{{ with secret "kv/data/nextcloud" }}
CREATE DATABASE IF NOT EXISTS `{{ .Data.data.db_name }}`;
CREATE USER IF NOT EXISTS '{{ .Data.data.db_user }}'@'%' IDENTIFIED BY '{{ .Data.data.db_pass }}';
GRANT ALL ON `{{ .Data.data.db_name }}`.* to '{{ .Data.data.db_user }}'@'%';
{{ end }}
CREATE DATABASE IF NOT EXISTS `${var.nextcloud_db}`;
CREATE USER IF NOT EXISTS '${var.nextcloud_user}'@'%' IDENTIFIED BY '${var.nextcloud_pass}';
GRANT ALL ON `${var.nextcloud_db}`.* to '${var.nextcloud_user}'@'%';
EOF
destination = "local/bootstrap.sql"
}
@ -153,25 +131,9 @@ GRANT ALL ON `{{ .Data.data.db_name }}`.* to '{{ .Data.data.db_user }}'@'%';
env = {
"MYSQL_HOST" = "${NOMAD_UPSTREAM_ADDR_mysql_server}"
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
template {
data = <<EOF
{{ with secret "kv/data/nextcloud" }}
MYSQL_DATABASE={{ .Data.data.db_name }}
MYSQL_USER={{ .Data.data.db_user }}
MYSQL_PASSWORD={{ .Data.data.db_pass }}
{{ end }}
EOF
destination = "secrets/db.env"
env = true
"MYSQL_DATABASE" = "${var.nextcloud_db}"
"MYSQL_USER" = "${var.nextcloud_user}"
"MYSQL_PASSWORD" = "${var.nextcloud_pass}"
}
resources {
@ -214,38 +176,14 @@ MYSQL_PASSWORD={{ .Data.data.db_pass }}
target = "/jobs"
source = "jobs"
}
mount {
type = "bind"
target = "/tmp/passphrase"
source = "secrets/passphrase"
}
}
env = {
"MYSQL_HOST" = "${NOMAD_UPSTREAM_IP_mysql_server}"
"MYSQL_PORT" = "${NOMAD_UPSTREAM_PORT_mysql_server}"
}
vault {
policies = ["access-tables", "nomad-task"]
}
template {
data = "{{ with secret \"kv/data/nextcloud\" }}{{ .Data.data.backup_passphrase }}{{ end }}"
destination = "secrets/passphrase"
}
template {
data = <<EOF
{{ with secret "kv/data/nextcloud" }}
MYSQL_DATABASE={{ .Data.data.db_name }}
MYSQL_USER={{ .Data.data.db_user }}
MYSQL_PASSWORD={{ .Data.data.db_pass }}
{{ end }}
EOF
destination = "secrets/db.env"
env = true
"MYSQL_DATABASE" = "${var.nextcloud_db}"
"MYSQL_USER" = "${var.nextcloud_user}"
"MYSQL_PASSWORD" = "${var.nextcloud_pass}"
}
template {

85
nomad/packer/cloud-config Normal file
View File

@ -0,0 +1,85 @@
#cloud-config
hostname: node1
users:
name: iamthefij
gecos: Ian Fijolek
groups: users, sudoers
# sudo: ALL=(ALL) NOPASSWD:ALL
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDqVH0C0Vf5cA+QnUlkYHnJ9hWs6hUiOuoIOS7+8fSlK05Chy8WbLijE8MEA6R4dvkWtWnmx8bJnpwJl/mMHendX86ko879EonHNGSLBvbHeMJTjvSVmH2UdLCMhG4+nhj7WgAC7z7o/EtRohD0BQYFAkGaC7PYWSJMExi0sCStPjqjFdDHXrsrLR0Xho2tcLEsW6jZboj5D0j8fcFN2Yn3c3yiHdS3UqHatP1QwaqVLZnujcJZXpOBZqON45SoWy+N4c0Xm0bNc/cZLU3+cPnHKdwBMsJ17Np0CA9PWuC+/6CR/f1de4+SjcMnYtpNOQ0PMlKo0FH1Iim6a2zu6Ia7 ianfijolek-home
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXmLEpRejmI4+dWXW4bpFBXVNsdeyMQHPh57tFNqewCyuWLs0TQYoMlCVe90GYOLz670aeWQ98otImIOmkhgmhilntTP4fSWonAjdATh9sHfPP9ZdhlxoMlr2rcdkudNY2IZh0OxfBGXiwGwMiB57ZHBntls1xTcztWm95e19Ys7UnjY/ewwhKbNHz4ibhdCR37UjWE5fl0J28Iea1FPWJhOLlO0bo2iMolHQ2r1++eA0qFT1T6irLuXWEfzK10XDcDCxtyloBYSc6s5ICDbYda68eIqIcAhrwXwr1eAGvW+0Q/C0HQvg8nICS09Mz6BSRrHsfVtxF709yW/A9i7dW5LxL2KGQfyWLQs5CiimvSRFe2d932nnl0Yi0j4z4co5nJs5U3XGdZA7b+gN9iumNY91dofhOk46OlUdLoZ3nrVpCFGHsFiOkTpArgneCjiyImjk35WETJWytp68mTwQrepVHT4WwqvLi28sGdV2m+9IeJD/w535+xJ5GcNwZ4CYdFOte8z+2k4sVIT1qmUxB2wfcDKrPRTyUrXw8f1EcdXNB02yAq8RVzetVW5XrR1rJ1Ht+YhkH+553DPOSwqBMyzH6VBf6SPsqzwo3Z0ZuBEdBmLdh1o8Rrxu3+dApETUYBbeXQGkbYwEuhsEUk8+fnl5gp19sZSXgM/yrpk170w== ansible-home-devterm
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDEgJzZmKLLA1D055xAGqyzaCnKo2s9aBeSPT9OY+7bDqHxYE/HUTp5zf3uO7yQ970/fI3XmDHoUE08ZyLOBXECPL0zsSljYx2tDivhEg8VwORfbp2J2cUVcd1EciZX36OuZWmRitqnYSzVglbRoAjGbmwap20vWjvsnc2mD9wiJl4Jj7VZsS8S97Di/Zos2ksLWeh7i8jrOxVXQtb6UGu3d1CLd/XXIkVSMIBSQBHwq5yB+13p1+RdMl7mosflzX6bm2gaB+bMBIXbak1BkQRxTS2FSdMDebJ9NCrj17R0SOJjtUCRfwG5gV2CGqp1E/WcEIfKGmHlaIMYx8B9MmDnZvcmoBAuZ83+xYPdzs6/b68VRMRonw9U+xLNjJTsNvQstyFx8afINABjrPrMdvMDmS6Of7GrXycKH6bZ9H2WZzK0WGFbrjz7WCl4WUTkbhmJlGqyq7USWx33eLgpexn0N0bR4HgsoFNvwsIFLpBMPuYtdsHKJ7Md4N8OPC/5PUj7enn9zr0jJm1SB4c3kyrw3V9dtUpEkZwD5UHMX3BYgZ8Mk/EUf1avbHs+wk2D94GgKKVKphNYUFJFeQ2MmvGIHXZqT5Y+LmolaCMeiQHRN4hR8lWZ3YEW1EPfb8EuEWwDtZBdkTdq3WBD/5dxLVGxKfWqyARd6DQKbrEjyUb2/Q== ansible-host
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICNlio04oYMNhFNp1tH142OEPX6s6Foul7xJBEJrxA23 ifij@C02FQ1A8Q05Q
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC+mAHEhjAGuSrK7otwuh0P0cnzpde78XVxy6TnrVIcAxOii4tt66f8L+AkacWxBvST+OXfBR0EPMuIr8ya8KsHVA2f7W+XTcH/s3WwHuNvSIf7M3Pt1bGOVtU53HAxeMkNjjJ0kPK9MRiYeONIcj2wuZUwIywH+zCJZiRuYjWTEcz4h6hlaBfwdBtuC5WcoQYUwyuudT0iYFMvG4VA1fk6rqEtM9DkLuCGgXZJb6kn5v4ZSHo7tTelpEAyooaPX7Z94YQ87/oNdUi5NFQRZ6tZFQPLAm3U67IwS3u+Di84vt/rkDL/nnIVbzN9w+DFl60RHG61sf4rnFj4In7e/ypD/IY3NPjFgKJa1ItzoNlJcx+W5RtvP0TjqaUjkbVeUUzE6enRxRGRnHmW5Rz2VbkeQnywEwhy5EZ8I8/exWuvgP5UrW7byUUcMMIHRYMvTf4yqS3+ycoLxQpRncTg5oAY7gMpwXBoPOGFheiP7A6AAKTbZjgT+0uWxlZhxiERyeudDQyhWIQryDBtXtr0JgK/kudn3w4aFvf9Q+mR3hLepSIprHqBzgYTdIjBcX6yyn1CDZyBpI1DOKC2pApou/Pj1iM7nPRu5vjs/C27JaEbarn3nPYNSflUmn9kwKYP3p+HsKczPHhkphcJRGn8J+8BSJPTYTBmtsIMix2Ale/ObQ== WorkingCopy@IansiPad-28012021
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCl1REER7jgH2G0uSinEA48++6F+OffkyyLuBA6KXg38+i+BmRnwETzyaBYoahD/QD9lrfGicUXUM4Vp4cX/hYvWbXc4VVwYzMBpZ6w3ZW0jzLYlILU9e1iUnMqkKkVHd+rYS/djTEBA3qev6Sn1IBg9t/LSE1+zLn2xH15RusKsCOzL0o/yCiSWtiLipGuywaNt6ZcmiJQmk87HhS68uQIVQr9EoG2gcNJt/1Nd0ykuBWmLZq8QXp40P4TBDCIOWBjHLjaknm1+yroooAHV1oNaPymSMXgXK2HqdvvEjyUc6H1euIcWuU+vORiAxdZDaIQVD8Y4+Slp1RsG1t2ICXY9/htZdulTkUQCGaCSMh3o7tq0RDA/oPiAmEpvzGe7NR5R74lkj6sVIFK/zu+3w8MgaMBQXiG8EmFj7G9UBWW5g53h6sG0nho8z7wJ4m14l2RwNT9d1PyLw6YVbmBAIFyMiI20c9ITbVECCIVrGW1S6pAC4EGvUfx5zlayW+CmZk86Ut9NXWhPGw2Whd2J/J7Q6TFXD/ASd0elTqMn6CeqrALZQnR/LpXmlqfrWI42Qiqh1Mz1IhZhNff2grVpCK2rxYpIsom3Yn+mZn8hZYSQ8BNF3VoQmNK0Og/t3iUekBvQLRk26z0bNLNdWHNz+uofBbEiOyxCwiJF0fQpxk/Yw== ansible-home-ianubuntu
apt:
sources:
hashicorp:
source: deb https://apt.releases.hashicorp.com $RELEASE main
key: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBF60TuYBEADLS1MP7XrMlRkn1Y54cb2UclUMH8HkIRfBrhk5Leo9kNZc/2QD
LmdQbi3UbZkz0uVkHqbFDgV5lAnukCnxgr9BqnL0GJpO78le7gCCbM5bR4rTJ6Ar
OOtIKf25smGTIpbSwNdj8BOLqiExGFj/9L5X9S5kfq3vtuYt+lmxKkIrEPjSYnFR
TQ2mTL8RM932GJod/5VJ2+6YvrCjtPu5/rW02H1U2ZHiTtX6ZGnIvv/sprKyFRqT
x4Ib+o9XwXof/LuxTMpVwIHSzCYanH5hPc7yRGKzIntBS+dDom+h9smx7FTgpHwt
QRFGLtVoHXqON6nXTLFDkEzxr+fXq/bgB1Kc1TuzvoK601ztQGhhDaEPloKqNWM8
Ho7JU1RpnoWr5jOFTYiPM9uyCtFNsJmD9mt4K8sQQN7T2inR5Us0o510FqePRFeX
wOJUMi1CbeYqVHfKQ5cWYujcK8pv3l1a6dSBmFfcdxtwIoA16JzCrgsCeumTDvKu
hOiTctb28srL/9WwlijUzZy6R2BGBbhP937f2NbMS/rpby7M1WizKeo2tkKVyK+w
SUWSw6EtFJi7kRSkH7rvy/ysU9I2ma88TyvyOgIz1NRRXYsW7+brgwXnuJraOLaB
5aiuhlngKpTPvP9CFib7AW2QOXustMZ7pOUREmxgS4kqxo74CuFws163TwARAQAB
tFFIYXNoaUNvcnAgU2VjdXJpdHkgKEhhc2hpQ29ycCBQYWNrYWdlIFNpZ25pbmcp
IDxzZWN1cml0eStwYWNrYWdpbmdAaGFzaGljb3JwLmNvbT6JAk4EEwEIADgWIQTo
oDLglNjrTqGJ0nDaQYyIoyGfewUCXrRO5gIbAwULCQgHAgYVCgkICwIEFgIDAQIe
AQIXgAAKCRDaQYyIoyGfe6/WD/9dTM/1OSgbvSPpPJOOcn5L1nOKRBJpztr4V0ky
GoCDakIQ/sykbcuHXP79FGLzrM8zQOsbvVp/Z2lsWBnxkT8KWM+8LZxYToRGdZhr
huFPHV9df0vAsZGisu4ejHDneHOTO3KqVotkky34jUSjBL7Q8uwXHY9r+5hb452N
vafN1w0Y1QVhb6JjjwWHR8Rf9qkSIEi6m9o8a1M54yQC2y/Zrs6+4F3zZ4uYfTvz
MyFfj0P5VmAoaowLSRdb2/JTObu0+zpKN+PjZA8BcnOf/pvqmEz83FIfo6zJLScx
TVaAwj5Iz/jS04x7EvBuIP3vpgv1R6r+t0qU/7hpu7Oc0dsxhL+C8BpVY26/2hvX
ozN5eG0ysSwexqwls+bnRgd6KdoHlWFNfbW8RCPKyb/s+tmFqGAY/QmxMkukgnXQ
WvBoa0Gdv2AFVLYup9tEO1zF4zBPh5oQwAXDNudLTHJ4KmyEwWsOQJUjNB4y4a7j
iGgK77T4KKXpo7pVDP8Ur+tmNH/d+/YFjxrfJvWt4ypE5dZmFO/FrUMvIGglOLDt
A+SiQe73IpEebB8PiqNlqJ2NU7artuRxYQVColt+/1puIHwV+h0SnMoUEvYqAtxP
J/N3JaiytWlesPPFWvhU/JGUAld5coEU2gbYtlenV/YmdjilIBu50sMSPGF5/6gv
BAA/DbkCDQRetE7mARAA0OH1pn0vdEfSm1kdqIDP3BXBD0BRHNNgGpyXXRRJFaip
bmpu7jSv3FsvN/NmG3BcLXXLFvwY/eIOr6fxRye+a5FSQEtvBnI1GHNmD5GAVT/H
KiwrT5e3ReR/FQS7hCXWU4OA2bKmSEdkJ952NhyYeyAKbkOBgbnlEhtWOAdMI7ws
peHAlHDqfGVOKXDh+FddCUQj/yZ2rblSzFdcC9gtcJSyHWgOQdVAEesEZ16hcZoj
+6O+6BXOQWOo7EPD7lA9a1qesBkSRcxQn48IVVZ2Qx2P2FtCfF+SFX+HQdqJGl15
qxE5CXTuJCMmCVnWhvcLW405uF/HmMFXdqGobEDiQsFFQrfpPVOi4T90VkW8P81s
uPoAlWht1CppNnmhWlvPQsPK/oSMBBOvOEH1EnWJate8yIkveNbqzrE7Xt3sjF6k
yqXaF+qW8OcDvSH/fgvVd21G10Cm77Z2WaKWvfi221oWj+WrgT8cCYv0AVmaLRMe
dajuYlPRQ8KaZaESza2eXggOMP5LQs/mQgfHfwSRekSbKg/L6ctp+xrZ0DPj4iIl
8+H4DxTILopAFWXA1a+uMVp8mV77gA9PyV3nIkrwgaZQ8MdhoKwvN/+SbvhpdzyF
UekzMP/HOaC6JgAomluwnFCdMDFa3FMCF3QUcIyY556QdoFD7g6033xqV6vL+d8A
EQEAAYkCNgQYAQgAIBYhBOigMuCU2OtOoYnScNpBjIijIZ97BQJetE7mAhsMAAoJ
ENpBjIijIZ97lecP+wTgSqhCz3TlUshR8lVrzECueIg3jh3+lY56am9X4MoZ2DAW
IXKjWKVWO55WPYD15A7+TbDyb4zh55m81LxSpV0CSRN4aPuixosWP4d0l+363D2F
oudz+QyvoK5J2sKFPMfhdTgGsEYVO/Zbhus5oNi0kjUTD9U7jHWPS3ilvk/g2F+k
T68lL9+oooleeT+kcBvbKt487JUOwMrkmHqNZdh8qmvMASAuqBcEcqjz96kVEMJY
bhn2skexKfIncoo/btixzJUbnplpDfibFxUHhvWWdwIv4kl3YnrCKKGSDoJcG1mV
sQegK4jWVGrqY8MnCI48iotP18ZxyqOycsZvs2jNmFlKwD9s1mrlr97HZ1MYbLWr
Hq06owH0AzVRM7tzMK7EuHkFLcoa8qh3oijn8O0B7xNOKpTZ2DjajQ/1w8nqmMi5
Z3Wie6ivKng/7p6c6HDrKjoQYc0/fuh1YnL60JG2Arn1OwdBsLDlzPL+Ro5iNwoJ
hZ+stxoZT48iAIWonBsLU11Y+MSwWdN1Eh411HTTunrEs6SafMEhnPi7vvUIZhny
Es0qOM/IUR1I0VtsurSn8aA6Y2Bp73+HuqFLx13/tPKBIUo6D7n/ywUlDCo7wtCw
aSgXPw6uF+0CyLOQ0haf2j6w1OB8ayEGSkTPER5rImCJf3MGw8IECGrErAd+
=EMKC
-----END PGP PUBLIC KEY BLOCK-----
packages:
- python3
- python3-pip
- git
- nomad
- consul
- vault
# vim: set ft=yaml.cloudinit :

View File

@ -0,0 +1,35 @@
packer {
required_plugins {
docker = {
version = ">= 0.0.7"
source = "github.com/hashicorp/docker"
}
}
}
source "qemu" "focal-arm64" {
qemu_binary = "qemu-system-aarch64"
# machine_type = "raspi3b"
machine_type = "virt"
headless = true
# Can't use boot command with this true
# disable_vnc = true
iso_url = "https://cloud-images.ubuntu.com/releases/focal/release-20220308/ubuntu-20.04-server-cloudimg-arm64.img"
iso_checksum = "sha256:e905900cd0a0d716a72f83dc94a6e2260275dc0e867c84196a8d6d1bc783b304"
output_directory = "focal_arm64"
shutdown_command = "echo 'packer' | sudo -S shutdown -P now"
disk_size = "5000M"
format = "raw"
ssh_username = "root"
ssh_password = "s0m3password"
ssh_timeout = "20m"
boot_wait = "10s"
# boot_command = []
}
build {
sources = ["source.qemu.focal-arm64"]
}

View File

@ -24,23 +24,15 @@ locals {
vault_node_address = "http://${local.vault_node.node_address}:${local.vault_node.port}"
}
# Configure the Vault provider
provider "vault" {
address = length(var.vault_address) == 0 ? local.vault_node_address : var.vault_address
token = var.vault_token
}
# Something that should exist in a post bootstrap module, right now module includes bootstrapping
# which requries Admin
# data "vault_nomad_access_token" "deploy" {
# backend = "nomad"
# role = "deploy"
# }
# Configure the Nomad provider
provider "nomad" {
address = length(var.nomad_address) == 0 ? local.nomad_node_address : var.nomad_address
address = local.nomad_node_address
secret_id = var.nomad_secret_id
# secret_id = length(var.nomad_secret_id) == 0 ? data.vault_nomad_access_token.admin.secret_id : var.nomad_secret_id
region = "global"
region = "global"
}
# Configure the Vault provider
provider "vault" {
address = local.vault_node_address
token = var.vault_token
}

View File

@ -1,45 +0,0 @@
---
- name: Recovery Consul
hosts: consul_instances
tasks:
- name: Stop Consul
systemd:
name: consul
state: stopped
become: true
- name: Get node-id
slurp:
src: /opt/consul/node-id
register: consul_node_id
become: true
- name: Node Id
debug:
msg: "node_id: {{ consul_node_id.content }}"
- name: Address
debug:
msg: "address: {{ ansible_default_ipv4.address }}"
- name: Save
copy:
dest: "/opt/consul/raft/peers.json"
content: |
[
{% for host in ansible_play_hosts|reject('equalto', inventory_hostname) -%}
{
"id": "{{ hostvars[host].consul_node_id.content }}",
"address": "{{ hostvars[host].ansible_default_ipv4.address }}:8300",
"non_voter": false
}{% if not loop.last %},{% endif %}
{% endfor -%}
]
become: true
- name: Restart Consul
systemd:
name: consul
state: restarted
become: true

View File

@ -1,45 +0,0 @@
---
- name: Recovery Nomad
hosts: nomad_instances
tasks:
- name: Stop Nomad
systemd:
name: nomad
state: stopped
become: true
- name: Get node-id
slurp:
src: /var/nomad/server/node-id
register: nomad_node_id
become: true
- name: Node Id
debug:
msg: "node_id: {{ nomad_node_id.content }}"
- name: Address
debug:
msg: "address: {{ ansible_default_ipv4.address }}"
- name: Save
copy:
dest: /var/nomad/server/raft/peers.json
content: |
[
{% for host in ansible_play_hosts|reject('equalto', inventory_hostname) -%}
{
"id": "{{ hostvars[host].nomad_node_id.content }}",
"address": "{{ hostvars[host].ansible_default_ipv4.address }}:4647",
"non_voter": false
}{% if not loop.last %},{% endif %}
{% endfor -%}
]
become: true
- name: Restart Nomad
systemd:
name: nomad
state: restarted
become: true

View File

@ -0,0 +1,38 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" {
version = "2.15.0"
hashes = [
"h1:o+Su3YqeOkHgf86GEArIVDZfaZQphYFjAOwpi/b0bzs=",
"zh:0bd2a9873099d89bd52e9eee623dd20ccb275d1e2f750da229a53a4d5b23450c",
"zh:1c9f87d4d97b2c61d006c0bef159d61d2a661a103025f8276ebbeb000129f931",
"zh:25b73a34115255c464be10a53f2510c4a1db958a71be31974d30654d5472e624",
"zh:32fa31329731db2bf4b7d0f09096416ca146f05b58f4482bbd4ee0f28cefbbcc",
"zh:59136b73d3abe7cc5b06d9e12d123ad21298ca86ed49a4060a3cd7c2a28a74a1",
"zh:a191f3210773ca25c543a92f2d392b85e6a053d596293655b1f25b33eb843b4c",
"zh:b8b6033cf0687eadc1099f11d9fb2ca9429ff40c2d85bd6cb047c0f6bc5d5d8d",
"zh:bb7d67ed28aa9b28fc5154161af003383f940b2beda0d4577857cad700f39cd1",
"zh:be615288f59327b975532a1999deab60a022e6819fe80e5a32526155210ecbba",
"zh:de1e3d5c34eef87eb301e74717754babb6dc8e19e3a964919e1165c5a076a719",
"zh:eb8c61b20d8ce2bfff9f735ca8456a0d6368af13aa1f43866f61c70f88cc491c",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.16"
hashes = [
"h1:tyfjD/maKzb0RxxD9KWgLnkJu9lnYziYsQgGw85Giz8=",
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
]
}

View File

@ -1,18 +0,0 @@
---
roles:
- src: https://github.com/IamTheFij/ansible-consul.git
name: ansible-consul
scm: git
version: my-main
- src: https://github.com/IamTheFij/ansible-nomad.git
name: ansible-nomad
scm: git
version: my-main
- src: https://github.com/ansible-community/ansible-vault.git
name: ansible-vault
scm: git
version: master
# - src: maxhoesel.smallstep
# version: 0.4.10
- src: geerlingguy.docker
version: 4.2.2

View File

@ -1,11 +1,6 @@
# Can't run this as part of root and as a submodule because of tf state
# module "acls" {
# source = "./acls"
#
# consul_address = var.consul_address
# nomad_secret_id = var.nomad_secret_id
# vault_token = var.vault_token
# }
module "acls" {
source = "./acls"
}
# module "storage_plugins" {
# source = "./storage_plugins"

View File

@ -1,207 +0,0 @@
# Vars
# name = string*
# image = string*
# service_port = int
# ingress = bool
# sticky_disk = bool
# args = json(list[str])
# resources = dict(cpu = int, mem = int)
# env = json(dict(str: any))
# templates = json(list(dict(
# data = str,
# dest = str,
# change_mode = str,
# change_signal = str,
# left_delimiter = str,
# right_delimiter = str,
# )))
# host_volumes = json(list(dict(
# name = str,
# dest = str,
# read_only = bool,
# )))
# healthcheck = "/"
# upstreams = json(list(dict(
# destination_name = str,
# local_bind_port = int
# )))
# mysql = bool
# redis = bool
# vault = bool
job "[[.name]]" {
region = "global"
datacenters = ["dc1"]
type = "service"
group "[[.name]]" {
[[ with .count ]]count = [[ . ]][[ end ]]
network {
mode = "bridge"
[[ if not (empty .service_port) -]]
port "main" {
[[ if default false .ingress -]]
host_network = "loopback"
[[ end -]]
to = [[ .service_port ]]
}
[[ end -]]
}
[[ if default false .sticky_disk ]]
ephemeral_disk {
migrate = true
sticky = true
}
[[ end ]]
[[ with .host_volumes -]]
[[ range $v := . | parseJSON -]]
volume "[[ $v.name ]]" {
type = "host"
read_only = [[ $v.read_only ]]
source = "[[ $v.name ]]"
}
[[ end ]]
[[ end -]]
[[ if not (empty .service_port) ]]
service {
name = "[[.name | replace "_" "-"]]"
port = "main"
[[ if default false .ingress ]]
connect {
sidecar_service {
proxy {
local_service_port = [[ .service_port ]]
[[ if default false .mysql -]]
upstreams {
destination_name = "mysql-server"
local_bind_port = 4040
}
[[ end -]]
[[ if default false .redis -]]
upstreams {
destination_name = "redis"
local_bind_port = 6379
}
[[ end -]]
[[ with .upstreams -]]
[[range $u := . | parseJSON -]]
upstreams {
destination_name = "[[ $u.destination_name ]]"
local_bind_port = [[ $u.local_bind_port ]]
}
[[ end ]]
[[ end -]]
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
memory_max = 50
}
}
}
[[ end ]]
[[ if not (eq .healthcheck "") -]]
check {
type = "http"
path = "[[ or .healthcheck "/" ]]"
port = "main"
interval = "10s"
timeout = "10s"
}
[[ end -]]
tags = [
[[ if default false .ingress -]]
"traefik.enable=true",
"traefik.http.routers.[[.name]].entryPoints=websecure",
[[ if not (empty .ingress_rule) -]]
"traefik.http.routers.[[.name]].rule=[[.ingress_rule]]",
[[ end -]]
[[ end -]]
]
}
[[ end -]]
task "[[.name]]" {
driver = "docker"
config {
image = "[[.image]]"
[[ if not (empty .service_port) -]]
ports = ["main"]
[[ end -]]
[[ if not (empty .args) -]]
args = ["[[ .args | parseJSON | join `", "` ]]"]
[[ end -]]
[[ with .templates -]]
[[ range $t := . | parseJSON -]]
mount {
type = "bind"
target = "[[ $t.dest ]]"
source = "local/[[ $t.dest ]]"
}
[[ end ]]
[[ end -]]
}
[[ if default false .vault -]]
vault {
policies = [
"access-tables",
"nomad-task",
]
}
[[ end -]]
[[ with .env -]]
env = {
[[ range $k, $v := . | parseJSON -]]
"[[$k]]" = "[[$v]]"
[[ end -]]
}
[[ end -]]
[[ with .host_volumes -]]
[[ range $v := . | parseJSON -]]
volume_mount {
volume = "[[ $v.name ]]"
destination = "[[ $v.dest ]]"
read_only = [[ $v.read_only ]]
}
[[ end ]]
[[ end -]]
[[ with .templates -]]
[[ range $t := . | parseJSON -]]
template {
data = <<EOF
[[ $t.data ]]
EOF
destination = "local/[[ $t.dest ]]"
[[ with $t.left_delimiter ]]left_delimiter = "[[ . ]]"[[ end -]]
[[ with $t.right_delimiter ]]right_delimiter = "[[ . ]]"[[ end -]]
[[ with $t.change_mode ]]change_mode = "[[ . ]]"[[ end -]]
[[ with $t.change_signal ]]change_signal = "[[ . ]]"[[ end -]]
[[ with $t.env ]]env = [[ . ]][[ end ]]
}
[[ end -]]
[[ end -]]
[[ with .resources -]]
resources {
cpu = [[ .cpu ]]
memory = [[ .memory ]]
}
[[ end -]]
}
}
}

View File

@ -1,5 +1,72 @@
module "services" {
source = "./services"
depends_on = [module.databases, module.core]
module "databases" {
source = "./databases"
}
module "blocky" {
source = "./blocky"
base_hostname = var.base_hostname
depends_on = [module.databases]
}
module "traefik" {
source = "./traefik"
consul_address = var.consul_address
base_hostname = var.base_hostname
}
module "metrics" {
source = "./metrics"
consul_address = var.consul_address
}
module "nextcloud" {
source = "./nextcloud"
depends_on = [module.databases]
}
module "backups" {
source = "./backups"
depends_on = [module.databases]
}
module "media" {
source = "./media"
}
resource "nomad_job" "whoami" {
hcl2 {
enabled = true
vars = {
"count" = "${2 * length(data.consul_service.nomad.service)}",
}
}
jobspec = file("${path.module}/whoami.nomad")
}
resource "consul_config_entry" "global_access" {
name = "*"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "traefik"
Precedence = 6
Type = "consul"
},
{
Action = "deny"
Name = "*"
Precedence = 5
Type = "consul"
},
]
})
}

View File

@ -1,40 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/consul" {
version = "2.16.2"
hashes = [
"h1:epldE7sZPBTQHnWEA4WlNJIOVT1UEX+/02SMg5nniaE=",
"zh:0a2e11ca2ba650954951a087a1daec95eee2f3000456b295409a9880c4a10b1a",
"zh:34f6bda06a0d1c213fa8d87d4313687681e67bc8c40c4cbaa7dbe59ce24a4f7e",
"zh:5b85cf93db11ee890f720c317a38158927071feb634855786a0c0cd65825a43c",
"zh:75ef915f3d087e6045751a66fbb7066a852a0944ec8c97200d1134dd84df7ffc",
"zh:8a4a95697bd91ad51a581c12fe50ac61a114afba27895d027f77ac4154a7ea15",
"zh:973d538c8d72793861a1ac9718249a9493f417a2b5096846367560054fd843b9",
"zh:9feb2bdc06fdc2d8370cc9aad9a0c69e7e5ae38aac43f315c3f57507c57be030",
"zh:c5709672d0afecbbe298bf519741ebcb9d04f02a73b5ee0c186dfa241aa5a524",
"zh:c65c60570de6da7190e1e7762577655a463caeb59bc5d38e33034821ed0cbcb9",
"zh:c958d6282650fc472aade61d5df4300936033f43cfb898293ef86aceccdfdf1d",
"zh:cdd3632c81e1d11d3becd193aaa061688840f39147950c45c4301d042743ae6a",
"zh:f3d3efac504c9484a025beb919d22b290aa6dbff256f6e86c1f8ce7817e077e5",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.19"
hashes = [
"h1:EdBny2gaLr/IE+l+6csyCKeIGFMYZ/4tHKpcbS7ArgE=",
"zh:2f3ceeb3318a6304026035b0ac9ee3e52df04913bb9ee78827e58c5398b41254",
"zh:3fbe76c7d957d20dfe3c8c0528b33084651f22a95be9e0452b658e0922916e2a",
"zh:595671a05828cfe6c42ef73aac894ac39f81a52cc662a76f37eb74ebe04ddf75",
"zh:5d76e8788d2af3e60daf8076babf763ec887480bbb9734baccccd8fcddf4f03e",
"zh:676985afeaca6e67b22d60d43fd0ed7055763029ffebc3026089fe2fd3b4a288",
"zh:69152ce6164ac999a640cff962ece45208270e1ac37c10dac484eeea5cf47275",
"zh:6da0b15c05b81f947ec8e139bd81eeeb05c0d36eb5a967b985d0625c60998b40",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:822c0a3bbada5e38099a379db8b2e339526843699627c3be3664cc3b3752bab7",
"zh:af23af2f98a84695b25c8eba7028a81ad4aad63c44aefb79e01bbe2dc82e7f78",
"zh:e36cac9960b7506d92925b667254322520966b9c3feb3ca6102e57a1fb9b1761",
"zh:ffd1e096c1cc35de879c740a91918e9f06b627818a3cb4b1d87b829b54a6985f",
]
}

View File

@ -1,200 +0,0 @@
job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
datacenters = ["dc1"]
%{ if batch_node == null ~}
type = "system"
%{ else ~}
type = "batch"
parameterized {
meta_required = ["job_name"]
meta_optional = ["task", "snapshot"]
}
meta {
task = "backup"
snapshot = "latest"
}
%{ endif ~}
%{ if batch_node == null ~}
constraint {
attribute = "$${node.unique.name}"
operator = "set_contains_any"
# Only deploy to nodes running tasks to backup
value = "n1,n2"
}
%{ else ~}
constraint {
attribute = "$${node.unique.name}"
value = "${batch_node}"
}
%{ endif ~}
group "backup" {
network {
mode = "bridge"
port "metrics" {
to = 8080
}
}
volume "all-volumes" {
type = "host"
read_only = false
source = "all-volumes"
}
service {
name = "backups"
port = "metrics"
# Add connect to mysql
connect {
sidecar_service {
proxy {
local_service_port = 8080
upstreams {
destination_name = "mysql-server"
local_bind_port = 6060
}
config {
protocol = "tcp"
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
meta {
metrics_addr = "$${NOMAD_ADDR_metrics}"
}
}
task "backup" {
driver = "docker"
volume_mount {
volume = "all-volumes"
destination = "/data"
read_only = false
}
config {
image = "iamthefij/resticscheduler"
ports = ["metrics"]
args = [
%{ if batch_node != null ~}
"-once",
"-$${NOMAD_META_task}",
"$${NOMAD_META_job_name}",
%{ endif ~}
"/jobs/node-jobs.hcl",
]
mount {
type = "bind"
target = "/jobs"
source = "jobs"
}
}
vault {
policies = [
"access-tables",
"nomad-task",
]
}
env = {
"MYSQL_HOST" = "$${NOMAD_UPSTREAM_IP_mysql_server}"
"MYSQL_PORT" = "$${NOMAD_UPSTREAM_PORT_mysql_server}"
}
template {
# Probably want to use database credentials that have access to dump all tables
data = <<EOF
{{ with secret "kv/data/nextcloud" -}}
MYSQL_DATABASE={{ .Data.data.db_name }}
MYSQL_USER={{ .Data.data.db_user }}
MYSQL_PASSWORD={{ .Data.data.db_pass }}
{{ end -}}
{{ with secret "kv/data/backups" -}}
BACKUP_PASSPHRASE={{ .Data.data.backup_passphrase }}
RCLONE_FTP_HOST={{ .Data.data.nas_ftp_host }}
RCLONE_FTP_USER={{ .Data.data.nas_ftp_user }}
RCLONE_FTP_PASS={{ .Data.data.nas_ftp_pass | toJSON }}
RCLONE_FTP_EXPLICIT_TLS=true
RCLONE_FTP_NO_CHECK_CERTIFICATE=true
{{ end -}}
EOF
destination = "secrets/db.env"
env = true
}
template {
data = <<EOH
CONSUL_HTTP_ADDR={{ env "attr.unique.network.ip-address" }}:8500
EOH
destination = "local/consul.env"
env = true
}
template {
# Build jobs based on node
data = <<EOF
# Current node is {{ env "node.unique.name" }}
{{ if eq (env "node.unique.name") "n2" -}}
# Consul backup
${file("${module_path}/jobs/consul.hcl")}
{{ end -}}
{{ range service "nextcloud" -}}
# Nextcloud .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/nextcloud.hcl")}
{{ end -}}
{{ end -}}
{{ range service "lldap" -}}
# Lldap .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/lldap.hcl")}
{{ end -}}
{{ end -}}
{{ range service "sonarr" -}}
# Lldap .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/sonarr.hcl")}
{{ end -}}
{{ end -}}
{{ range service "nzbget" -}}
# Lldap .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/nzbget.hcl")}
{{ end -}}
{{ end -}}
EOF
destination = "jobs/node-jobs.hcl"
}
resources {
cpu = 50
memory = 256
}
}
}
}

View File

@ -1,24 +0,0 @@
resource "nomad_job" "backups" {
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = "${path.module}",
batch_node = null,
})
}
# Get Nomad clients from Consul
# data "consul_service" "nomad" {
# name = "nomad-client"
# }
resource "nomad_job" "backups-oneoff" {
# TODO: Get list of nomad hosts dynamically
for_each = toset(["n1", "n2"])
# for_each = toset([
# for node in data.consul_service.nomad.service :
# node.node_name
# ])
jobspec = templatefile("${path.module}/backup.nomad", {
module_path = "${path.module}",
batch_node = each.key,
})
}

View File

@ -1,33 +0,0 @@
job "Consul" {
schedule = "0 * * * *"
config {
repo = "rclone::ftp,env_auth:/nomad/consul"
passphrase = env("BACKUP_PASSPHRASE")
}
task "Use consul snapshots" {
pre_script {
on_backup = "mkdir -p /local/consul"
}
pre_script {
on_backup = "consul snapshot save /local/consul/backup.snap"
}
post_script {
on_restore = "consul snapshot restore /local/consul/backup.snap"
}
}
backup {
paths = ["/local/consul"]
# Because path is absolute
restore_opts {
Target = "/"
}
}
forget {
KeepLast = 2
Prune = true
}
}

View File

@ -1,27 +0,0 @@
job "lldap" {
schedule = "@daily"
config {
repo = "rclone::ftp,env_auth:/nomad/lldap"
passphrase = env("BACKUP_PASSPHRASE")
}
# sqlite "Backup database" {
# path = "/data/lldap/users.db"
# # sqlite3 /data/lldap/users.db .backup /data/lldap/users.db.bak
# dump_to = "/data/lldap/users.db.bak"
# }
backup {
paths = ["/data/lldap"]
# Because path is absolute
restore_opts {
Target = "/"
}
}
forget {
KeepLast = 2
Prune = true
}
}

View File

@ -1,21 +0,0 @@
job "nzbget" {
schedule = "@daily"
config {
repo = "rclone::ftp,env_auth:/nomad/nzbget"
passphrase = env("BACKUP_PASSPHRASE")
}
backup {
paths = ["/data/nzbget"]
# Because path is absolute
restore_opts {
Target = "/"
}
}
forget {
KeepLast = 2
Prune = true
}
}

View File

@ -1,27 +0,0 @@
job "sonarr" {
schedule = "@daily"
config {
repo = "rclone::ftp,env_auth:/nomad/sonarr"
passphrase = env("BACKUP_PASSPHRASE")
}
# sqlite "Backup database" {
# path = "/data/lldap/users.db"
# # sqlite3 /data/lldap/users.db .backup /data/lldap/users.db.bak
# dump_to = "/data/lldap/users.db.bak"
# }
backup {
paths = ["/data/sonarr"]
# Because path is absolute
restore_opts {
Target = "/"
}
}
forget {
KeepLast = 2
Prune = true
}
}

View File

@ -1,29 +0,0 @@
resource "consul_service" "homeassistant" {
name = "hass"
node = consul_node.homeassistant.name
port = 8123
tags = [
"traefik.enable=true",
"traefik.consulcatalog.connect=false",
"traefik.http.routers.hass.entryPoints=websecure",
]
check {
check_id = "homeassistant:hass"
status = "passing"
name = "Home Assistant Health Check"
http = "192.168.3.65:8123"
interval = "30s"
timeout = "10s"
}
}
resource "consul_node" "homeassistant" {
name = "homeassistant"
address = "192.168.3.65"
meta = {
"external-node" = "true"
"external-probe" = "true"
}
}

View File

@ -1,201 +0,0 @@
job "ipdvr" {
region = "global"
datacenters = ["dc1"]
type = "service"
group "nzbget" {
network {
mode = "bridge"
port "main" {
host_network = "loopback"
to = 6789
}
}
volume "nzbget-data" {
type = "host"
read_only = false
source = "nzbget-data"
}
volume "download" {
type = "host"
read_only = false
source = "download"
}
service {
name = "nzbget"
port = "main"
connect {
sidecar_service {
proxy {
local_service_port = 6789
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
memory_max = 50
}
}
}
# check {
# type = "http"
# path = "/"
# port = "main"
# interval = "10s"
# timeout = "10s"
# }
tags = [
"traefik.enable=true",
"traefik.http.routers.nzbget.entryPoints=websecure",
]
}
task "nzbget" {
driver = "docker"
config {
image = "linuxserver/nzbget"
ports = ["main"]
}
env = {
"PGID" = 100
"PUID" = 1001
"TZ" = "America/Los_Angeles"
}
volume_mount {
volume = "nzbget-data"
destination = "/config"
read_only = false
}
volume_mount {
volume = "download"
destination = "/downloads"
read_only = false
}
resources {
cpu = 200
memory = 200
memory_max = 500
}
}
}
group "sonarr" {
network {
mode = "bridge"
port "main" {
host_network = "loopback"
to = 8989
}
}
volume "sonarr-data" {
type = "host"
read_only = false
source = "sonarr-data"
}
volume "tv-sonarr" {
type = "host"
read_only = false
source = "tv-sonarr"
}
volume "download" {
type = "host"
read_only = false
source = "download"
}
service {
name = "sonarr"
port = "main"
connect {
sidecar_service {
proxy {
local_service_port = 8989
upstreams {
destination_name = "nzbget"
local_bind_port = 6789
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 20
memory_max = 50
}
}
}
# check {
# type = "http"
# path = "/"
# port = "main"
# interval = "10s"
# timeout = "10s"
# }
tags = [
"traefik.enable=true",
"traefik.http.routers.sonarr.entryPoints=websecure",
]
}
task "sonarr" {
driver = "docker"
config {
image = "linuxserver/sonarr"
ports = ["main"]
}
env = {
"PGID" = 100
"PUID" = 1001
"TZ" = "America/Los_Angeles"
}
volume_mount {
volume = "sonarr-data"
destination = "/config"
read_only = false
}
volume_mount {
volume = "tv-sonarr"
destination = "/tv"
read_only = false
}
volume_mount {
volume = "download"
destination = "/downloads"
read_only = false
}
resources {
cpu = 100
memory = 300
memory_max = 500
}
}
}
}

View File

@ -1,109 +0,0 @@
# module "nextcloud" {
# source = "./nextcloud"
#
# depends_on = [module.databases]
# }
module "backups" {
source = "./backups"
# In parent module
# depends_on = [module.databases]
}
module "media" {
source = "./media"
}
resource "nomad_job" "whoami" {
hcl2 {
enabled = true
vars = {
"count" = 1,
# "count" = "${2 * length(data.consul_service.nomad.service)}",
}
}
jobspec = file("${path.module}/whoami.nomad")
}
resource "nomad_job" "ipdvr" {
jobspec = file("${path.module}/ip-dvr.nomad")
}
resource "consul_config_entry" "nzbget_intents" {
depends_on = [nomad_job.ipdvr]
name = "nzbget"
kind = "service-intentions"
config_json = jsonencode({
Sources = [
{
Action = "allow"
Name = "sonarr"
Precedence = 9
Type = "consul"
},
]
})
}
# module "nzbget" {
# source "./levant"
#
# template_path = "service.nomad"
# variables = {
# name = "nzbget"
# image = "linuxserver/nzbget"
# service_port = 6789
# ingress = true
# env = jsonencode({
# PGID = 100
# PUID = 1001
# TZ = "America/Los_Angeles"
# })
# host_volumes = jsonencode([
# {
# name = "download"
# dest = "/srv/volumes/download"
# read_only = false
# },
# ])
# }
# }
#
# module "sonarr" {
# source = "./levant"
#
# template_path = "service.nomad"
# variables = {
# name = "sonarr"
# image = "linuxserver/sonarr"
# service_port = 8989
# ingress = true
# env = jsonencode({
# PGID = 100
# PUID = 1001
# TZ = "America/Los_Angeles"
#
# })
# host_volumes = jsonencode([
# {
# name = "sonarr-data"
# dest = "/config"
# read_only = false
# },
# {
# name = "tv-sonarr"
# dest = "/srv/volumes/media-write/TV Shows"
# read_only = false
# },
# {
# name = "download"
# dest = "/srv/volumes/download"
# read_only = false
# },
# ])
# }
# }

View File

@ -3,52 +3,36 @@
hosts: consul_instances
any_errors_fatal: true
vars_files:
- consul_values.yml
roles:
- role: ansible-consul
vars:
consul_version: "1.13.3-1"
consul_version: "1.11.3"
consul_install_remotely: true
consul_install_upgrade: true
consul_install_from_repo: true
consul_os_repo_prerequisites: []
consul_node_role: server
consul_bootstrap_expect: true
consul_bootstrap_expect_value: "{{ [(play_hosts | length), 3] | min }}"
consul_user: consul
consul_manage_user: true
consul_group: bin
consul_manage_group: true
consul_architecture_map:
x86_64: amd64
armhfv6: arm
armv7l: arm
# consul_tls_enable: true
consul_connect_enabled: true
consul_ports_grpc: 8502
consul_client_address: "0.0.0.0"
# Autopilot
consul_autopilot_enable: true
consul_autopilot_cleanup_dead_Servers: true
# Enable metrics
consul_config_custom:
telemetry:
prometheus_retention_time: "2h"
# DNS forwarding
consul_dnsmasq_enable: true
consul_dnsmasq_servers:
# TODO: use addresses of other nomad nodes?
# Maybe this can be [] to get the values from dhcp
- 1.1.1.1
- 1.0.0.1
consul_dnsmasq_bind_interfaces: true
consul_dnsmasq_listen_addresses:
# Listen only to loopback interface
- 127.0.0.1
become: true
tasks:
@ -58,28 +42,39 @@
name: consul
become: true
# If DNS is broken after dnsmasq, then need to set /etc/resolv.conf to something
# pointing to 127.0.0.1 and possibly restart Docker and Nomad
- name: Update resolv.conf
lineinfile:
dest: /etc/resolv.conf
create: true
line: "nameserver 127.0.0.1"
become: true
- name: Add values
block:
- name: Install python-consul
pip:
name: python-consul
extra_args: --index-url https://pypi.org/simple
- name: Add a value to Consul
consul_kv:
host: "{{ inventory_hostname }}"
key: ansible_test
value: Hello from Ansible!
- name: Set hostname
consul_kv:
host: "{{ inventory_hostname }}"
key: global/base_hostname
# TODO: propogate this through via Consul and Nomad templates rather than Terraform
value: dev.homelab
delegate_to: localhost
run_once: true
- name: Setup Vault cluster
hosts: vault_instances
vars_files:
- ./vault_hashi_vault_values.yml
roles:
- name: ansible-vault
vars:
vault_version: 1.12.0-1
# Doesn't support multi-arch installs
vault_install_hashi_repo: true
vault_harden_file_perms: true
vault_bin_path: /usr/bin
vault_harden_file_perms: true
vault_address: 0.0.0.0
vault_backend: consul
@ -93,6 +88,7 @@
status_code: 200, 429, 472, 473, 501, 503
body_format: json
return_content: true
run_once: true
register: vault_status
- name: Initialize Vault
@ -141,28 +137,7 @@
- "-address=http://127.0.0.1:8200/"
- "{{ item }}"
loop: "{{ unseal_keys_hex }}"
when:
- unseal_keys_hex is defined
- vault_status.json["sealed"]
- name: Install Docker
hosts: nomad_instances
become: true
vars:
docker_architecture_map:
x86_64: amd64
armv7l: armhf
aarch64: arm64
docker_apt_arch: "{{ docker_architecture_map[ansible_architecture] }}"
docker_compose_arch: "{{ (ansible_architecture == 'armv7l') | ternary('armv7', ansible_architecture) }}"
roles:
- geerlingguy.docker
tasks:
- name: Remove snapd
package:
name: snapd
state: absent
when: unseal_keys_hex is defined
# Not on Ubuntu 20.04
# - name: Install Podman
@ -201,29 +176,15 @@
state: mounted
fstype: nfs4
- name: Create Media Library RW NFS mount
ansible.posix.mount:
src: 192.168.2.10:/Multimedia
path: /srv/volumes/media-write
opts: proto=tcp,port=2049,rw
state: mounted
fstype: nfs4
- name: Create Download RW NFS mount
ansible.posix.mount:
src: 192.168.2.10:/Download
path: /srv/volumes/download
opts: proto=tcp,port=2049,rw
state: mounted
fstype: nfs4
- name: Create Container NAS RW NFS mount
ansible.posix.mount:
src: 192.168.2.10:/Container
path: /srv/volumes/container
opts: proto=tcp,port=2049,rw
state: mounted
fstype: nfs4
- name: Install Docker
hosts: nomad_instances
become: true
vars:
deb_arch: "{% if ansible_architecture == 'x86_64' %}amd64{% elif ansible_architecture == 'armv7l' %}armhf{% endif %}"
docker_apt_arch: "{{ deb_arch }}"
docker_compose_arch: "{{ (ansible_architecture == 'armv7l') | ternary('armv7', ansible_architecture) }}"
roles:
- geerlingguy.docker
- name: Build Nomad cluster
hosts: nomad_instances
@ -235,60 +196,42 @@
- name: motioneye-recordings
path: /srv/volumes/motioneye-recordings
owner: "root"
group: "root"
group: "bin"
mode: "0755"
read_only: false
- name: media-read
path: /srv/volumes/media-write
read_only: true
- name: media-write
path: /srv/volumes/media-write
path: /srv/volumes/media-read
owner: "root"
group: "root"
mode: "0755"
read_only: false
- name: tv-sonarr
path: "/srv/volumes/media-write/TV Shows"
owner: 1001
group: 100
mode: "0755"
read_only: false
- name: download
path: /srv/volumes/download
owner: 1001
group: 100
mode: "0755"
read_only: false
- name: nzbget-data
path: /srv/volumes/container/nzbget/config
read_only: false
- name: gitea-data
path: /srv/volumes/container/gitea
read_only: false
mode: "0777"
read_only: true
- name: all-volumes
path: /srv/volumes
owner: "root"
group: "root"
mode: "0755"
mode: "0777"
read_only: false
roles:
- name: ansible-nomad
vars:
nomad_version: "1.4.1-1"
nomad_version: "1.3.1"
nomad_install_remotely: true
nomad_install_upgrade: true
nomad_allow_purge_config: true
nomad_meta:
# There are issues with v1.23.0 on arm64
connect.sidecar_image: envoyproxy/envoy:v1.23.1
# Where nomad gets installed to
nomad_bin_dir: /usr/bin
nomad_install_from_repo: true
nomad_user: root
nomad_manage_user: true
nomad_group: bin
nomad_manage_group: true
# Properly map install arch
nomad_architecture_map:
x86_64: amd64
armhfv6: arm
armv7l: arm
nomad_bootstrap_expect: "{{ [(play_hosts | length), 3] | min }}"
nomad_raft_protocol: 3
nomad_autopilot: true
nomad_encrypt_enable: true
# nomad_use_consul: true
@ -306,7 +249,6 @@
nomad_docker_dmsetup: false
# nomad_podman_enable: true
# Merge shared host volumes with node volumes
nomad_host_volumes: "{{ shared_host_volumes + (nomad_unique_host_volumes | default([])) }}"
# Customize docker plugin
@ -334,6 +276,9 @@
# Create networks for binding task ports
nomad_host_networks:
# - name: public
# interface: eth0
# reserved_ports: "22"
- name: nomad-bridge
interface: nomad
reserved_ports: "22"
@ -345,19 +290,6 @@
nomad_acl_enabled: true
# Enable vault integration
# HACK: Only talk to local Vault for now because it doesn't have HTTPS
# TODO: Would be really great to have this over https and point to vault.consul.service
# nomad_vault_address: "https://vault.service.consul:8200"
# Right now, each node only talks to it's local Vault, so if that node is rebooted and
# that vault is sealed, it will not have access to vault. This is a problem if a node
# must reboot.
nomad_vault_address: "http://127.0.0.1:8200"
# TODO: This fails on first run because the Nomad-Vault integration can't be set up
# until Nomad has started. Could maybe figure out if ACLs have been set up and leave
# these out until the later play, maybe just bootstrap the nomad-cluster role in Vault
# befor Nomad is set up
nomad_vault_create_from_role: "nomad-cluster"
# TODO: (security) Probably want to restict this to a narrower scoped token
nomad_vault_enabled: "{{ root_token is defined }}"
nomad_vault_token: "{{ root_token | default('') }}"
@ -365,36 +297,25 @@
ui:
enabled: true
consul:
ui_url: "https://{{ ansible_hostname }}:8500/ui"
ui_url: "http://{{ ansible_hostname }}:8500/ui"
vault:
ui_url: "https://{{ ansible_hostname }}:8200/ui"
ui_url: "http://{{ ansible_hostname }}:8200/ui"
consul:
tags:
- "traefik.enable=true"
- "traefik.consulcatalog.connect=true"
- "traefik.http.routers.nomadclient.entrypoints=websecure"
- name: Bootstrap Nomad ACLs and scheduler
hosts: nomad_instances
tasks:
- name: Start Nomad
systemd:
state: started
name: nomad
- name: Nomad API reachable?
uri:
url: "http://127.0.0.1:4646/v1/status/leader"
method: GET
status_code: 200
register: nomad_check_result
retries: 6
until: nomad_check_result is succeeded
delay: 10
changed_when: false
run_once: true
- name: Bootstrap Nomad ACLs
hosts: nomad_instances
tasks:
- name: Bootstrap ACLs
command:
argv:
@ -414,6 +335,16 @@
delegate_to: localhost
run_once: true
- name: Look for policy
command:
argv:
- nomad
- acl
- policy
- list
run_once: true
register: policies
- name: Read secret
command:
argv:
@ -424,38 +355,11 @@
delegate_to: localhost
run_once: true
no_log: true
changed_when: false
register: read_secretid
- name: Enable service scheduler preemption
command:
argv:
- nomad
- operator
- scheduler
- set-config
- -preempt-system-scheduler=true
- -preempt-service-scheduler=true
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
delegate_to: "{{ play_hosts[0] }}"
run_once: true
- name: Look for policy
command:
argv:
- nomad
- acl
- policy
- list
environment:
NOMAD_TOKEN: "{{ read_secretid.stdout }}"
run_once: true
register: policies
- name: Copy policy
copy:
src: ./acls/nomad-anon-policy.hcl
src: ./acls/nomad-anon-bootstrap.hcl
dest: /tmp/anonymous.policy.hcl
delegate_to: "{{ play_hosts[0] }}"
register: anon_policy
@ -468,7 +372,7 @@
- acl
- policy
- apply
- -description="Anon read only"
- -description="Anon RW"
- anonymous
- /tmp/anonymous.policy.hcl
environment:
@ -476,24 +380,3 @@
when: policies.stdout == "No policies found" or anon_policy.changed
delegate_to: "{{ play_hosts[0] }}"
run_once: true
- name: Set up Nomad backend and roles in Vault
community.general.terraform:
project_path: ./acls
force_init: true
variables:
consul_address: "{{ play_hosts[0] }}:8500"
vault_token: "{{ root_token }}"
nomad_secret_id: "{{ read_secretid.stdout }}"
delegate_to: localhost
run_once: true
notify:
- Restart Nomad
handlers:
- name: Restart Nomad
systemd:
state: restarted
name: nomad
retries: 6
delay: 5

Some files were not shown because too many files have changed in this diff Show More