Update hosts improve bootstrap and move a few things around

This commit is contained in:
IamTheFij 2022-03-12 10:07:52 -08:00
parent 24484ccc7e
commit b2c03f1e60
12 changed files with 411 additions and 119 deletions

View File

@ -56,10 +56,11 @@ cluster: consul-up nomad-up
venv/bin/ansible: venv/bin/ansible:
python3 -m venv venv python3 -m venv venv
./venv/bin/pip install ansible ./venv/bin/pip install ansible
./venv/bin/pip install python-consul
.PHONY: ansible-cluster .PHONY: ansible-cluster
ansible-cluster: venv/bin/ansible ansible-cluster: venv/bin/ansible
./venv/bin/ansible-galaxy install -p ansible_roles -r roles/requirements.yml ./venv/bin/ansible-galaxy install -p roles -r roles/requirements.yml
./venv/bin/ansible-playbook -K -vv -i ansible_hosts.yml -M ./roles ./setup-cluster.yml ./venv/bin/ansible-playbook -K -vv -i ansible_hosts.yml -M ./roles ./setup-cluster.yml
.PHONY: plan .PHONY: plan

View File

@ -3,14 +3,13 @@ all:
children: children:
servers: servers:
hosts: hosts:
services.thefij: nomad0.thefij:
consul_node_role: bootstrap # consul_node_role: bootstrap
nomad_node_role: both nomad_node_role: both
nomad_node_class: ingress
nomad_host_volumes: nomad_host_volumes:
- name: mysql-data - name: mysql-data
path: /srv/volumes/mysql-data path: /srv/volumes/mysql-data
owner: "nomad" owner: "root"
group: "bin" group: "bin"
mode: "0755" mode: "0755"
read_only: false read_only: false
@ -19,6 +18,9 @@ all:
# dns_san: ["services.thefij"] # dns_san: ["services.thefij"]
# ip_san: ["192.168.2.41", "127.0.0.1"] # ip_san: ["192.168.2.41", "127.0.0.1"]
# motionpi.thefij: {} # motionpi.thefij: {}
nomad1.thefij:
nomad_node_class: ingress
nomad_node_role: both
consul_instances: consul_instances:
children: children:

47
nomad/docker-setup.yaml Normal file
View File

@ -0,0 +1,47 @@
---
- name: Setup Docker on host
hosts: docker_hosts
vars:
user_name: "{{ create_user | default(ansible_user) }}"
docker_config_path: >-
{% if ansible_facts.os_family == 'Darwin' -%}
~/.docker/daemon.json
{%- else -%}
/etc/docker/daemon.json
{%- endif %}
ansible_python_interpreter: python3
roles:
- docker_install
tasks:
- name: Add to Docker group
user:
name: "{{ user_name }}"
groups: docker
append: true
become: true
- name: Use journald logging driver # noqa 207
json_merge:
path: "{{ docker_config_path }}"
allow_create: true
format_indent: true
update_json: {
"log-driver": "journald",
"log-opts": {
"tag": "{{ '{{ .Name }}/{{ .ImageName }}/{{ .ID }}' }}",
"labels": "com.docker.compose.project,com.docker.compose.service"
}
}
become: true
notify: Restart Docker daemon
handlers:
- name: Restart Docker daemon
service:
name: docker
state: restarted
become: true
when: ansible_facts['os_family'] != "Darwin"

124
nomad/metrics/grafana.nomad Normal file
View File

@ -0,0 +1,124 @@
job "grafana" {
datacenters = ["dc1"]
group "grafana" {
count = 1
network {
mode = "bridge"
port "web" {
host_network = "loopback"
to = 3000
}
}
service {
port = "web"
connect {
sidecar_service {
proxy {
local_service_port = 3000
upstreams {
destination_name = "prometheus"
local_bind_port = 9090
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
check {
type = "http"
path = "/"
port = "web"
interval = "10s"
timeout = "10s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.grafana.entrypoints=web,websecure",
"traefik.http.routers.grafana.rule=Host(`grafana.dev.homelab`)",
"traefik.subdomain=grafana-sub",
"traefik.http.routers.grafana.tls=true",
]
}
task "grafana" {
driver = "docker"
config {
image = "grafana/grafana:7.3.6"
ports = ["web"]
mount {
type = "bind"
target = "/etc/grafana/grafana.ini"
source = "local/config/grafana.ini"
}
mount {
type = "bind"
target = "/etc/grafana/provisioning"
source = "local/config/provisioning"
}
}
env = {
"GF_SECURITY_ADMIN_PASSWORD" = "password",
"GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel",
}
template {
data = <<EOF
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
data = /var/lib/grafana
# folder that contains provisioning config files that grafana will apply on startup and while running.
provisioning = /etc/grafana/provisioning
[server]
# Protocol (http, https, socket)
protocol = http
http_port = 3000
EOF
change_mode = "signal"
change_signal = "SIGHUP"
destination = "local/config/grafana.ini"
}
template {
data = <<EOF
---
apiVersion: 1
datasources:
- name: Prometheus
url: http://${NOMAD_UPSTREAM_ADDR_prometheus}
type: prometheus
access: proxy
isDefault: true
version: 1
EOF
change_mode = "signal"
change_signal = "SIGHUP"
destination = "local/config/provisioning/datasources/prometheus.yml"
}
resources {
cpu = 100
memory = 200
}
}
}
}

View File

@ -9,13 +9,13 @@ variable "consul_address" {
description = "address of consul server for dynamic scraping" description = "address of consul server for dynamic scraping"
} }
resource "nomad_job" "exporters" { # resource "nomad_job" "exporters" {
hcl2 { # hcl2 {
enabled = true # enabled = true
} # }
#
jobspec = file("${path.module}/exporters.nomad") # jobspec = file("${path.module}/exporters.nomad")
} # }
resource "nomad_job" "prometheus" { resource "nomad_job" "prometheus" {
hcl2 { hcl2 {
@ -27,20 +27,11 @@ resource "nomad_job" "prometheus" {
jobspec = file("${path.module}/prometheus.nomad") jobspec = file("${path.module}/prometheus.nomad")
} }
#
# resource "nomad_job" "prometheus" { resource "nomad_job" "grafana" {
# hcl2 { hcl2 {
# enabled = true enabled = true
# } }
#
# } jobspec = file("${path.module}/grafana.nomad")
# }
# resource "nomad_job" "grafana" {
# hcl2 {
# enabled = true
# vars = {
# "base_hostname" = "${var.base_hostname}",
# }
# }
#
# }

View File

@ -14,12 +14,13 @@ job "prometheus" {
mode = "bridge" mode = "bridge"
port "web" { port "web" {
host_network = "nomad-bridge" host_network = "loopback"
to = 9090 to = 9090
} }
} }
service { service {
name = "prometheus"
port = "web" port = "web"
connect { connect {

85
nomad/packer/cloud-config Normal file
View File

@ -0,0 +1,85 @@
#cloud-config
hostname: node1
users:
name: iamthefij
gecos: Ian Fijolek
groups: users, sudoers
# sudo: ALL=(ALL) NOPASSWD:ALL
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDqVH0C0Vf5cA+QnUlkYHnJ9hWs6hUiOuoIOS7+8fSlK05Chy8WbLijE8MEA6R4dvkWtWnmx8bJnpwJl/mMHendX86ko879EonHNGSLBvbHeMJTjvSVmH2UdLCMhG4+nhj7WgAC7z7o/EtRohD0BQYFAkGaC7PYWSJMExi0sCStPjqjFdDHXrsrLR0Xho2tcLEsW6jZboj5D0j8fcFN2Yn3c3yiHdS3UqHatP1QwaqVLZnujcJZXpOBZqON45SoWy+N4c0Xm0bNc/cZLU3+cPnHKdwBMsJ17Np0CA9PWuC+/6CR/f1de4+SjcMnYtpNOQ0PMlKo0FH1Iim6a2zu6Ia7 ianfijolek-home
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXmLEpRejmI4+dWXW4bpFBXVNsdeyMQHPh57tFNqewCyuWLs0TQYoMlCVe90GYOLz670aeWQ98otImIOmkhgmhilntTP4fSWonAjdATh9sHfPP9ZdhlxoMlr2rcdkudNY2IZh0OxfBGXiwGwMiB57ZHBntls1xTcztWm95e19Ys7UnjY/ewwhKbNHz4ibhdCR37UjWE5fl0J28Iea1FPWJhOLlO0bo2iMolHQ2r1++eA0qFT1T6irLuXWEfzK10XDcDCxtyloBYSc6s5ICDbYda68eIqIcAhrwXwr1eAGvW+0Q/C0HQvg8nICS09Mz6BSRrHsfVtxF709yW/A9i7dW5LxL2KGQfyWLQs5CiimvSRFe2d932nnl0Yi0j4z4co5nJs5U3XGdZA7b+gN9iumNY91dofhOk46OlUdLoZ3nrVpCFGHsFiOkTpArgneCjiyImjk35WETJWytp68mTwQrepVHT4WwqvLi28sGdV2m+9IeJD/w535+xJ5GcNwZ4CYdFOte8z+2k4sVIT1qmUxB2wfcDKrPRTyUrXw8f1EcdXNB02yAq8RVzetVW5XrR1rJ1Ht+YhkH+553DPOSwqBMyzH6VBf6SPsqzwo3Z0ZuBEdBmLdh1o8Rrxu3+dApETUYBbeXQGkbYwEuhsEUk8+fnl5gp19sZSXgM/yrpk170w== ansible-home-devterm
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDEgJzZmKLLA1D055xAGqyzaCnKo2s9aBeSPT9OY+7bDqHxYE/HUTp5zf3uO7yQ970/fI3XmDHoUE08ZyLOBXECPL0zsSljYx2tDivhEg8VwORfbp2J2cUVcd1EciZX36OuZWmRitqnYSzVglbRoAjGbmwap20vWjvsnc2mD9wiJl4Jj7VZsS8S97Di/Zos2ksLWeh7i8jrOxVXQtb6UGu3d1CLd/XXIkVSMIBSQBHwq5yB+13p1+RdMl7mosflzX6bm2gaB+bMBIXbak1BkQRxTS2FSdMDebJ9NCrj17R0SOJjtUCRfwG5gV2CGqp1E/WcEIfKGmHlaIMYx8B9MmDnZvcmoBAuZ83+xYPdzs6/b68VRMRonw9U+xLNjJTsNvQstyFx8afINABjrPrMdvMDmS6Of7GrXycKH6bZ9H2WZzK0WGFbrjz7WCl4WUTkbhmJlGqyq7USWx33eLgpexn0N0bR4HgsoFNvwsIFLpBMPuYtdsHKJ7Md4N8OPC/5PUj7enn9zr0jJm1SB4c3kyrw3V9dtUpEkZwD5UHMX3BYgZ8Mk/EUf1avbHs+wk2D94GgKKVKphNYUFJFeQ2MmvGIHXZqT5Y+LmolaCMeiQHRN4hR8lWZ3YEW1EPfb8EuEWwDtZBdkTdq3WBD/5dxLVGxKfWqyARd6DQKbrEjyUb2/Q== ansible-host
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICNlio04oYMNhFNp1tH142OEPX6s6Foul7xJBEJrxA23 ifij@C02FQ1A8Q05Q
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC+mAHEhjAGuSrK7otwuh0P0cnzpde78XVxy6TnrVIcAxOii4tt66f8L+AkacWxBvST+OXfBR0EPMuIr8ya8KsHVA2f7W+XTcH/s3WwHuNvSIf7M3Pt1bGOVtU53HAxeMkNjjJ0kPK9MRiYeONIcj2wuZUwIywH+zCJZiRuYjWTEcz4h6hlaBfwdBtuC5WcoQYUwyuudT0iYFMvG4VA1fk6rqEtM9DkLuCGgXZJb6kn5v4ZSHo7tTelpEAyooaPX7Z94YQ87/oNdUi5NFQRZ6tZFQPLAm3U67IwS3u+Di84vt/rkDL/nnIVbzN9w+DFl60RHG61sf4rnFj4In7e/ypD/IY3NPjFgKJa1ItzoNlJcx+W5RtvP0TjqaUjkbVeUUzE6enRxRGRnHmW5Rz2VbkeQnywEwhy5EZ8I8/exWuvgP5UrW7byUUcMMIHRYMvTf4yqS3+ycoLxQpRncTg5oAY7gMpwXBoPOGFheiP7A6AAKTbZjgT+0uWxlZhxiERyeudDQyhWIQryDBtXtr0JgK/kudn3w4aFvf9Q+mR3hLepSIprHqBzgYTdIjBcX6yyn1CDZyBpI1DOKC2pApou/Pj1iM7nPRu5vjs/C27JaEbarn3nPYNSflUmn9kwKYP3p+HsKczPHhkphcJRGn8J+8BSJPTYTBmtsIMix2Ale/ObQ== WorkingCopy@IansiPad-28012021
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCl1REER7jgH2G0uSinEA48++6F+OffkyyLuBA6KXg38+i+BmRnwETzyaBYoahD/QD9lrfGicUXUM4Vp4cX/hYvWbXc4VVwYzMBpZ6w3ZW0jzLYlILU9e1iUnMqkKkVHd+rYS/djTEBA3qev6Sn1IBg9t/LSE1+zLn2xH15RusKsCOzL0o/yCiSWtiLipGuywaNt6ZcmiJQmk87HhS68uQIVQr9EoG2gcNJt/1Nd0ykuBWmLZq8QXp40P4TBDCIOWBjHLjaknm1+yroooAHV1oNaPymSMXgXK2HqdvvEjyUc6H1euIcWuU+vORiAxdZDaIQVD8Y4+Slp1RsG1t2ICXY9/htZdulTkUQCGaCSMh3o7tq0RDA/oPiAmEpvzGe7NR5R74lkj6sVIFK/zu+3w8MgaMBQXiG8EmFj7G9UBWW5g53h6sG0nho8z7wJ4m14l2RwNT9d1PyLw6YVbmBAIFyMiI20c9ITbVECCIVrGW1S6pAC4EGvUfx5zlayW+CmZk86Ut9NXWhPGw2Whd2J/J7Q6TFXD/ASd0elTqMn6CeqrALZQnR/LpXmlqfrWI42Qiqh1Mz1IhZhNff2grVpCK2rxYpIsom3Yn+mZn8hZYSQ8BNF3VoQmNK0Og/t3iUekBvQLRk26z0bNLNdWHNz+uofBbEiOyxCwiJF0fQpxk/Yw== ansible-home-ianubuntu
apt:
sources:
hashicorp:
source: deb https://apt.releases.hashicorp.com $RELEASE main
key: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBF60TuYBEADLS1MP7XrMlRkn1Y54cb2UclUMH8HkIRfBrhk5Leo9kNZc/2QD
LmdQbi3UbZkz0uVkHqbFDgV5lAnukCnxgr9BqnL0GJpO78le7gCCbM5bR4rTJ6Ar
OOtIKf25smGTIpbSwNdj8BOLqiExGFj/9L5X9S5kfq3vtuYt+lmxKkIrEPjSYnFR
TQ2mTL8RM932GJod/5VJ2+6YvrCjtPu5/rW02H1U2ZHiTtX6ZGnIvv/sprKyFRqT
x4Ib+o9XwXof/LuxTMpVwIHSzCYanH5hPc7yRGKzIntBS+dDom+h9smx7FTgpHwt
QRFGLtVoHXqON6nXTLFDkEzxr+fXq/bgB1Kc1TuzvoK601ztQGhhDaEPloKqNWM8
Ho7JU1RpnoWr5jOFTYiPM9uyCtFNsJmD9mt4K8sQQN7T2inR5Us0o510FqePRFeX
wOJUMi1CbeYqVHfKQ5cWYujcK8pv3l1a6dSBmFfcdxtwIoA16JzCrgsCeumTDvKu
hOiTctb28srL/9WwlijUzZy6R2BGBbhP937f2NbMS/rpby7M1WizKeo2tkKVyK+w
SUWSw6EtFJi7kRSkH7rvy/ysU9I2ma88TyvyOgIz1NRRXYsW7+brgwXnuJraOLaB
5aiuhlngKpTPvP9CFib7AW2QOXustMZ7pOUREmxgS4kqxo74CuFws163TwARAQAB
tFFIYXNoaUNvcnAgU2VjdXJpdHkgKEhhc2hpQ29ycCBQYWNrYWdlIFNpZ25pbmcp
IDxzZWN1cml0eStwYWNrYWdpbmdAaGFzaGljb3JwLmNvbT6JAk4EEwEIADgWIQTo
oDLglNjrTqGJ0nDaQYyIoyGfewUCXrRO5gIbAwULCQgHAgYVCgkICwIEFgIDAQIe
AQIXgAAKCRDaQYyIoyGfe6/WD/9dTM/1OSgbvSPpPJOOcn5L1nOKRBJpztr4V0ky
GoCDakIQ/sykbcuHXP79FGLzrM8zQOsbvVp/Z2lsWBnxkT8KWM+8LZxYToRGdZhr
huFPHV9df0vAsZGisu4ejHDneHOTO3KqVotkky34jUSjBL7Q8uwXHY9r+5hb452N
vafN1w0Y1QVhb6JjjwWHR8Rf9qkSIEi6m9o8a1M54yQC2y/Zrs6+4F3zZ4uYfTvz
MyFfj0P5VmAoaowLSRdb2/JTObu0+zpKN+PjZA8BcnOf/pvqmEz83FIfo6zJLScx
TVaAwj5Iz/jS04x7EvBuIP3vpgv1R6r+t0qU/7hpu7Oc0dsxhL+C8BpVY26/2hvX
ozN5eG0ysSwexqwls+bnRgd6KdoHlWFNfbW8RCPKyb/s+tmFqGAY/QmxMkukgnXQ
WvBoa0Gdv2AFVLYup9tEO1zF4zBPh5oQwAXDNudLTHJ4KmyEwWsOQJUjNB4y4a7j
iGgK77T4KKXpo7pVDP8Ur+tmNH/d+/YFjxrfJvWt4ypE5dZmFO/FrUMvIGglOLDt
A+SiQe73IpEebB8PiqNlqJ2NU7artuRxYQVColt+/1puIHwV+h0SnMoUEvYqAtxP
J/N3JaiytWlesPPFWvhU/JGUAld5coEU2gbYtlenV/YmdjilIBu50sMSPGF5/6gv
BAA/DbkCDQRetE7mARAA0OH1pn0vdEfSm1kdqIDP3BXBD0BRHNNgGpyXXRRJFaip
bmpu7jSv3FsvN/NmG3BcLXXLFvwY/eIOr6fxRye+a5FSQEtvBnI1GHNmD5GAVT/H
KiwrT5e3ReR/FQS7hCXWU4OA2bKmSEdkJ952NhyYeyAKbkOBgbnlEhtWOAdMI7ws
peHAlHDqfGVOKXDh+FddCUQj/yZ2rblSzFdcC9gtcJSyHWgOQdVAEesEZ16hcZoj
+6O+6BXOQWOo7EPD7lA9a1qesBkSRcxQn48IVVZ2Qx2P2FtCfF+SFX+HQdqJGl15
qxE5CXTuJCMmCVnWhvcLW405uF/HmMFXdqGobEDiQsFFQrfpPVOi4T90VkW8P81s
uPoAlWht1CppNnmhWlvPQsPK/oSMBBOvOEH1EnWJate8yIkveNbqzrE7Xt3sjF6k
yqXaF+qW8OcDvSH/fgvVd21G10Cm77Z2WaKWvfi221oWj+WrgT8cCYv0AVmaLRMe
dajuYlPRQ8KaZaESza2eXggOMP5LQs/mQgfHfwSRekSbKg/L6ctp+xrZ0DPj4iIl
8+H4DxTILopAFWXA1a+uMVp8mV77gA9PyV3nIkrwgaZQ8MdhoKwvN/+SbvhpdzyF
UekzMP/HOaC6JgAomluwnFCdMDFa3FMCF3QUcIyY556QdoFD7g6033xqV6vL+d8A
EQEAAYkCNgQYAQgAIBYhBOigMuCU2OtOoYnScNpBjIijIZ97BQJetE7mAhsMAAoJ
ENpBjIijIZ97lecP+wTgSqhCz3TlUshR8lVrzECueIg3jh3+lY56am9X4MoZ2DAW
IXKjWKVWO55WPYD15A7+TbDyb4zh55m81LxSpV0CSRN4aPuixosWP4d0l+363D2F
oudz+QyvoK5J2sKFPMfhdTgGsEYVO/Zbhus5oNi0kjUTD9U7jHWPS3ilvk/g2F+k
T68lL9+oooleeT+kcBvbKt487JUOwMrkmHqNZdh8qmvMASAuqBcEcqjz96kVEMJY
bhn2skexKfIncoo/btixzJUbnplpDfibFxUHhvWWdwIv4kl3YnrCKKGSDoJcG1mV
sQegK4jWVGrqY8MnCI48iotP18ZxyqOycsZvs2jNmFlKwD9s1mrlr97HZ1MYbLWr
Hq06owH0AzVRM7tzMK7EuHkFLcoa8qh3oijn8O0B7xNOKpTZ2DjajQ/1w8nqmMi5
Z3Wie6ivKng/7p6c6HDrKjoQYc0/fuh1YnL60JG2Arn1OwdBsLDlzPL+Ro5iNwoJ
hZ+stxoZT48iAIWonBsLU11Y+MSwWdN1Eh411HTTunrEs6SafMEhnPi7vvUIZhny
Es0qOM/IUR1I0VtsurSn8aA6Y2Bp73+HuqFLx13/tPKBIUo6D7n/ywUlDCo7wtCw
aSgXPw6uF+0CyLOQ0haf2j6w1OB8ayEGSkTPER5rImCJf3MGw8IECGrErAd+
=EMKC
-----END PGP PUBLIC KEY BLOCK-----
packages:
- python3
- python3-pip
- git
- nomad
- consul
- vault
# vim: set ft=yaml.cloudinit :

View File

@ -9,19 +9,27 @@ packer {
source "qemu" "focal-arm64" { source "qemu" "focal-arm64" {
iso_url = "https://cloud-images.ubuntu.com/releases/focal/release/ubuntu-20.04-server-cloudimg-arm64.img" qemu_binary = "qemu-system-aarch64"
iso_checksum = "sha256:fee6bc4fcce3267b6a03a2449d5b471b7edf6ef990d6761607bd3960e2df4d9d" # machine_type = "raspi3b"
machine_type = "virt"
headless = true
# Can't use boot command with this true
# disable_vnc = true
iso_url = "https://cloud-images.ubuntu.com/releases/focal/release-20220308/ubuntu-20.04-server-cloudimg-arm64.img"
iso_checksum = "sha256:e905900cd0a0d716a72f83dc94a6e2260275dc0e867c84196a8d6d1bc783b304"
output_directory = "focal_arm64" output_directory = "focal_arm64"
shutdown_command = "echo 'packer' | sudo -S shutdown -P now" shutdown_command = "echo 'packer' | sudo -S shutdown -P now"
disk_size = "5000M" disk_size = "5000M"
format = "qcow2" format = "raw"
ssh_username = "root" ssh_username = "root"
ssh_password = "s0m3password" ssh_password = "s0m3password"
ssh_timeout = "20m" ssh_timeout = "20m"
boot_wait = "10s" boot_wait = "10s"
boot_command = [] # boot_command = []
} }
build { build {
sources = ["source.qemu.example"] sources = ["source.qemu.focal-arm64"]
} }

View File

@ -1,7 +1,7 @@
# Configure Consul provider # Configure Consul provider
variable "consul_address" { variable "consul_address" {
type = string type = string
default = "http://192.168.2.41:8500" default = "http://nomad0.thefij:8500"
} }
variable "base_hostname" { variable "base_hostname" {
@ -32,11 +32,11 @@ provider "nomad" {
# Define services as modules # Define services as modules
# module "mysql-server" { module "mysql-server" {
# source = "./mysql" source = "./mysql"
#
# base_hostname = var.base_hostname base_hostname = var.base_hostname
# } }
module "blocky" { module "blocky" {
source = "./blocky" source = "./blocky"
@ -58,13 +58,13 @@ module "metrics" {
base_hostname = var.base_hostname base_hostname = var.base_hostname
} }
# module "nextcloud" { module "nextcloud" {
# source = "./nextcloud" source = "./nextcloud"
#
# base_hostname = var.base_hostname base_hostname = var.base_hostname
#
# depends_on = [module.mysql-server] depends_on = [module.mysql-server]
# } }
resource "nomad_job" "whoami" { resource "nomad_job" "whoami" {
hcl2 { hcl2 {

View File

@ -2,27 +2,38 @@
- name: Build Consul cluster - name: Build Consul cluster
hosts: consul_instances hosts: consul_instances
any_errors_fatal: true any_errors_fatal: true
become: true
roles: roles:
- name: ansible-consul - role: ansible-consul
consul_version: "1.11.3" vars:
consul_install_remotely: true consul_version: "1.11.3"
consul_install_upgrade: true consul_install_remotely: true
consul_architecture_map: consul_install_upgrade: true
x86_64: amd64
armhfv6: arm
armv7l: arm
# consul_tls_enable: true consul_node_role: server
consul_connect_enabled: true consul_bootstrap_expect: true
consul_ports_grpc: 8502
consul_client_address: "0.0.0.0"
# Enable metrics consul_user: consul
consul_config_custom: consul_manage_user: true
telemetry: consul_group: bin
prometheus_retention_time: "2h" consul_manage_group: true
consul_architecture_map:
x86_64: amd64
armhfv6: arm
armv7l: arm
# consul_tls_enable: true
consul_connect_enabled: true
consul_ports_grpc: 8502
consul_client_address: "0.0.0.0"
# Enable metrics
consul_config_custom:
telemetry:
prometheus_retention_time: "2h"
become: true
tasks: tasks:
@ -30,21 +41,35 @@
systemd: systemd:
state: started state: started
name: consul name: consul
become: true
- name: Add values - name: Add values
# TODO: This can be run from localhost by using an address
block: block:
- name: Install pip - name: Install python-consul
pip: pip:
name: python-consul name: python-consul
extra_args: --index-url https://pypi.org/simple
- name: Add a value to Consul - name: Add a value to Consul
consul_kv: consul_kv:
host: "{{ inventory_hostname }}"
key: ansible_test key: ansible_test
value: Hello from Ansible! value: Hello from Ansible!
delegate_to: localhost
run_once: true run_once: true
# Not on Ubuntu 20.04
# - name: Install Podman
# hosts: nomad_instances
# become: true
#
# tasks:
# - name: Install Podman
# package:
# name: podman
# state: present
- name: Build Nomad cluster - name: Build Nomad cluster
hosts: nomad_instances hosts: nomad_instances
any_errors_fatal: true any_errors_fatal: true
@ -52,59 +77,72 @@
roles: roles:
- name: ansible-nomad - name: ansible-nomad
nomad_version: "1.2.6" vars:
nomad_install_remotely: true nomad_version: "1.2.6"
nomad_install_upgrade: true nomad_install_remotely: true
nomad_allow_purge_config: true nomad_install_upgrade: true
nomad_allow_purge_config: true
# Properly map install arch nomad_user: root
nomad_architecture_map: nomad_manage_user: true
x86_64: amd64 nomad_group: bin
armhfv6: arm nomad_manage_group: true
armv7l: arm
nomad_encrypt_enable: true # Properly map install arch
# nomad_use_consul: true nomad_architecture_map:
x86_64: amd64
armhfv6: arm
armv7l: arm
# Metrics nomad_encrypt_enable: true
nomad_telemetry: true # nomad_use_consul: true
nomad_telemetry_prometheus_metrics: true
nomad_telemetry_publish_allocation_metrics: true
nomad_telemetry_publish_node_metrics: true
# Enable container plugins # Metrics
nomad_cni_enable: true nomad_telemetry: true
nomad_cni_version: 1.0.1 nomad_telemetry_prometheus_metrics: true
nomad_docker_enable: true nomad_telemetry_publish_allocation_metrics: true
nomad_docker_dmsetup: false nomad_telemetry_publish_node_metrics: true
# Customize docker plugin # Enable container plugins
nomad_plugins: nomad_cni_enable: true
docker: nomad_cni_version: 1.0.1
config: nomad_docker_enable: true
volumes: nomad_docker_dmsetup: false
enabled: true # nomad_podman_enable: true
selinuxlabel: "z"
extra_labels:
- "job_name"
- "job_id"
- "task_group_name"
- "task_name"
- "namespace"
- "node_name"
- "node_id"
# Bind nomad # Customize docker plugin
nomad_bind_address: 0.0.0.0 nomad_plugins:
docker:
config:
volumes:
enabled: true
selinuxlabel: "z"
extra_labels:
- "job_name"
- "job_id"
- "task_group_name"
- "task_name"
- "namespace"
- "node_name"
- "node_id"
# Create networks for binding task ports # Bind nomad
nomad_host_networks: nomad_bind_address: 0.0.0.0
- name: nomad-bridge
interface: nomad # Create networks for binding task ports
reserved_ports: "22" nomad_host_networks:
- name: loopback - name: nomad-bridge
interface: lo interface: nomad
reserved_ports: "22" reserved_ports: "22"
- name: loopback
interface: lo
reserved_ports: "22"
nomad_config_custom:
ui:
enabled: true
consul:
ui_url: "http://{{ ansible_hostname }}:8500/ui"
tasks: tasks:
- name: Start Nomad - name: Start Nomad

View File

@ -86,7 +86,7 @@ job "traefik" {
"--providers.consulcatalog.endpoint.address=${var.consul_address}", "--providers.consulcatalog.endpoint.address=${var.consul_address}",
"--providers.consulcatalog.servicename=traefik", "--providers.consulcatalog.servicename=traefik",
"--providers.consulcatalog.prefix=traefik", "--providers.consulcatalog.prefix=traefik",
"--providers.consulcatalog.defaultrule=Host(`{{normalize .Name}}.${var.base_hostname}`)", "--providers.consulcatalog.defaultrule=Host(`{{normalize .Name}}.${var.base_hostname}`) || ({{hasKey .Labels \"subdomain\" }} && Host(`{{get .Labels \"subdomain\"}}.${var.base_hostname}`))",
] ]
ports = ["web", "websecure"] ports = ["web", "websecure"]

View File

@ -18,11 +18,6 @@ job "whoami" {
group "whoami" { group "whoami" {
count = var.count count = var.count
constraint {
operator = "distinct_hosts"
value = "true"
}
network { network {
mode = "bridge" mode = "bridge"
port "web" { port "web" {