Update ansible to deploy nomad and consul to Pi host

This is broken because the Pi doesn't have the right version of ip-tables
This commit is contained in:
IamTheFij 2022-02-27 14:49:00 -08:00
parent daa5a14f4e
commit 040b45eab0
7 changed files with 94 additions and 39 deletions

View File

@ -53,10 +53,14 @@ consul-up:
.PHONY: cluster
cluster: consul-up nomad-up
venv/bin/ansible:
python3 -m venv venv
./venv/bin/pip install ansible
.PHONY: ansible-cluster
ansible-cluster:
ansible-galaxy install -p roles -r roles/requirements.yml
ansible-playbook -K -vv -i ansible_hosts -M roles/ ./setup-cluster.yml
ansible-cluster: venv/bin/ansible
./venv/bin/ansible-galaxy install -p roles -r roles/requirements.yml
./venv/bin/ansible-playbook -K -vv -i ansible_hosts.yml -M roles/ ./setup-cluster.yml
.PHONY: plan
plan:

View File

@ -1,8 +0,0 @@
[servers]
services.thefij
[consul_instances]
services.thefij consul_node_role=bootstrap
[nomad_instances]
services.thefij nomad_node_role=both

28
nomad/ansible_hosts.yml Normal file
View File

@ -0,0 +1,28 @@
---
all:
children:
servers:
hosts:
services.thefij:
consul_node_role: bootstrap
nomad_node_role: both
nomad_node_class: ingress
nomad_host_volumes:
- name: mysql-data
path: /srv/volumes/mysql-data
owner: "nomad"
group: "bin"
mode: "0755"
read_only: false
# consul_auto_encrypt:
# enabled: true
# dns_san: ["services.thefij"]
# ip_san: ["192.168.2.41", "127.0.0.1"]
# motionpi.thefij: {}
consul_instances:
children:
servers: {}
nomad_instances:
children:
servers: {}

View File

@ -60,6 +60,9 @@ resource "nomad_job" "traefik" {
resource "nomad_job" "whoami" {
hcl2 {
enabled = true
vars = {
"count" = "${2 * length(data.consul_service.read-nomad-cluster.service)}"
}
}
jobspec = file("${path.module}/whoami.nomad")

View File

@ -7,23 +7,35 @@
roles:
- name: ansible-consul
consul_version: "1.11.3"
consul_install_remotely: true
consul_install_upgrade: true
consul_architecture_map:
x86_64: amd64
armhfv6: arm
armv7l: arm
# consul_tls_enable: true
consul_connect_enabled: true
consul_ports_grpc: 8502
consul_client_address: "0.0.0.0"
consul_auto_encrypt:
enabled: true
dns_san: ["services.thefij"]
ip_san: ["192.168.2.41", "127.0.0.1"]
# tasks:
# # Limit to consul host
# - name: Add a value to Consul
# consul_kv:
# key: ansible_test
# value: Hello from Ansible!
# execute_once: true
tasks:
- name: Start Consul
systemd:
state: started
name: consul
- name: Add values
block:
- name: Install pip
pip:
name: python-consul
- name: Add a value to Consul
consul_kv:
key: ansible_test
value: Hello from Ansible!
run_once: true
- name: Build Nomad cluster
hosts: nomad_instances
@ -33,20 +45,25 @@
roles:
- name: ansible-nomad
nomad_version: "1.2.6"
nomad_install_remotely: true
nomad_install_upgrade: true
nomad_allow_purge_config: true
nomad_encrypt_enable: true
nomad_cni_enable: true
nomad_docker_enable: true
# nomad_use_consul: true
nomad_cni_enable: true
nomad_cni_version: 1.0.1
nomad_docker_enable: true
nomad_docker_dmsetup: false
nomad_bind_address: 0.0.0.0
nomad_architecture_map:
x86_64: amd64
armhfv6: arm
armv7l: arm
nomad_host_networks:
# - name: public
# cidr: 192.168.0.0/16
- name: private
cidr: 10.0.0.0/8
reserved_ports: "22"
- name: nomad-bridge
# cidr: 172.26.64.0/20
interface: nomad
@ -54,12 +71,8 @@
- name: loopback
interface: lo
reserved_ports: "22"
# TODO: this should probably be based on host
nomad_host_volumes:
- name: mysql-data
path: /srv/volumes/mysql-data
owner: "nomad"
group: "bin"
mode: "0755"
read_only: false
tasks:
- name: Start Nomad
systemd:
state: started
name: nomad

View File

@ -16,6 +16,11 @@ job "traefik" {
type = "service"
constraint {
attribute = "${node.class}"
value = "ingress"
}
group "traefik" {
count = 1

View File

@ -4,6 +4,11 @@ variable "base_hostname" {
default = "dev.homelab"
}
variable "count" {
type = number
default = 2
}
job "whoami" {
region = "global"
datacenters = ["dc1"]
@ -11,7 +16,12 @@ job "whoami" {
type = "service"
group "whoami" {
count = 2
count = var.count
constraint {
operator = "distinct_hosts"
value = "true"
}
network {
mode = "bridge"