Update host networks and proxy mapping

This commit is contained in:
IamTheFij 2022-02-17 14:03:42 -08:00
parent 2ac0a3a15a
commit aff7fd0186
8 changed files with 94 additions and 25 deletions

1
.gitignore vendored
View File

@ -1 +1,2 @@
roles/ roles/
venv/

View File

@ -10,14 +10,19 @@ rm-nomad:
--ssh-target-user $(SSH_USER) \ --ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS) --ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: nomad .PHONY: nomad-up
nomad: nomad-up:
hashi-up nomad install \ hashi-up nomad install \
--ssh-target-addr $(SERVER) \ --ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \ --ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \ --ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS) \ --ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS) \
--server --client --server --client
hashi-up nomad start \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: rm-consul .PHONY: rm-consul
rm-consul: rm-consul:
@ -27,21 +32,29 @@ rm-consul:
--ssh-target-user $(SSH_USER) \ --ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS) --ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: consul .PHONY: consul-up
consul: consul-up:
hashi-up consul install \ hashi-up consul install \
--ssh-target-addr $(SERVER) \ --ssh-target-addr $(SERVER) \
--advertise-addr $(SERVER) \
--client-addr 0.0.0.0 \
--http-addr 0.0.0.0 \
--ssh-target-key $(SSH_KEY) \ --ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \ --ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS) \ --ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS) \
--advertise-addr $(SERVER) \
--client-addr 0.0.0.0 \
--http-addr 0.0.0.0 \
--connect \ --connect \
--server --server
hashi-up consul start \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: cluster .PHONY: cluster
cluster: cluster: consul-up nomad-up
.PHONY: ansible-cluster
ansible-cluster:
ansible-galaxy install -p roles -r roles/requirements.yml ansible-galaxy install -p roles -r roles/requirements.yml
ansible-playbook -K -vv -i ansible_hosts -M roles/ ./setup-cluster.yml ansible-playbook -K -vv -i ansible_hosts -M roles/ ./setup-cluster.yml

View File

@ -15,28 +15,37 @@ job "adminer" {
network { network {
mode = "bridge" mode = "bridge"
port "adminer" { port "adminer" {
static = 8080 host_network = "loopback"
to = 8080 to = 8080
} }
} }
service { service {
name = "adminer"
port = "adminer" port = "adminer"
connect { connect {
sidecar_service { sidecar_service {
proxy { proxy {
local_service_port = 8080
upstreams { upstreams {
destination_name = "mysql-server" destination_name = "mysql-server"
# TODO: how do I get these to not bind to the host eth0 address # TODO: how do I get these to not bind to the host eth0 address
local_bind_port = 4040 local_bind_port = 4040
} }
config { config {
protocol = "tcp" protocol = "tcp"
} }
} }
} }
sidecar_task {
resources {
cpu = 50
memory = 25
}
}
} }
tags = [ tags = [

View File

@ -4,7 +4,6 @@ job "mysql-server" {
group "mysql-server" { group "mysql-server" {
count = 1 count = 1
# Some affinity to stateful hosts?
restart { restart {
attempts = 10 attempts = 10
@ -17,6 +16,7 @@ job "mysql-server" {
mode = "bridge" mode = "bridge"
port "db" { port "db" {
static = 3306 static = 3306
to = 3306
} }
} }
@ -34,6 +34,7 @@ job "mysql-server" {
sidecar_service {} sidecar_service {}
} }
# Can't use a tcp check with bridge network or proxy
# check { # check {
# type = "tcp" # type = "tcp"
# interval = "10s" # interval = "10s"
@ -52,6 +53,7 @@ job "mysql-server" {
env = { env = {
"MYSQL_ROOT_PASSWORD" = "supersecretpassword" "MYSQL_ROOT_PASSWORD" = "supersecretpassword"
# Allow connections from any host
"MYSQL_ROOT_HOST" = "%" "MYSQL_ROOT_HOST" = "%"
} }
@ -61,7 +63,7 @@ job "mysql-server" {
} }
resources { resources {
cpu = 500 cpu = 300
memory = 1024 memory = 1024
} }
} }

View File

@ -5,7 +5,7 @@ variable "consul_address" {
} }
provider "consul" { provider "consul" {
address = "${var.consul_address}" address = var.consul_address
} }
# Get Nomad client from Consul # Get Nomad client from Consul
@ -15,14 +15,14 @@ data "consul_service" "read-nomad-cluster" {
} }
locals { locals {
nomad_node = "${data.consul_service.read-nomad-cluster.service[0]}" nomad_node = data.consul_service.read-nomad-cluster.service[0]
nomad_node_address = "http://${local.nomad_node.node_address}:${local.nomad_node.port}" nomad_node_address = "http://${local.nomad_node.node_address}:${local.nomad_node.port}"
} }
# Configure the Consul provider # Configure the Consul provider
provider "nomad" { provider "nomad" {
# address = "http://services.thefij:4646" # address = "http://services.thefij:4646"
address = "${local.nomad_node_address}" address = local.nomad_node_address
region = "global" region = "global"
} }
@ -64,3 +64,21 @@ resource "nomad_job" "whoami" {
jobspec = file("${path.module}/whoami.nomad") jobspec = file("${path.module}/whoami.nomad")
} }
# Create a sample host
resource "nomad_job" "nextcloud-bootstrap" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/nextcloud-bootstrap.nomad")
}
# Create a sample host
resource "nomad_job" "nextcloud" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/nextcloud.nomad")
}

View File

@ -25,7 +25,7 @@
# value: Hello from Ansible! # value: Hello from Ansible!
# execute_once: true # execute_once: true
- name: Build Consul cluster - name: Build Nomad cluster
hosts: nomad_instances hosts: nomad_instances
any_errors_fatal: true any_errors_fatal: true
become: true become: true
@ -39,6 +39,21 @@
nomad_cni_enable: true nomad_cni_enable: true
nomad_docker_enable: true nomad_docker_enable: true
# nomad_use_consul: true # nomad_use_consul: true
nomad_bind_address: 0.0.0.0
nomad_host_networks:
# - name: public
# cidr: 192.168.0.0/16
- name: private
cidr: 10.0.0.0/8
reserved_ports: "22"
- name: nomad-bridge
# cidr: 172.26.64.0/20
interface: nomad
reserved_ports: "22"
- name: loopback
interface: lo
reserved_ports: "22"
# TODO: this should probably be based on host # TODO: this should probably be based on host
nomad_host_volumes: nomad_host_volumes:

View File

@ -63,7 +63,7 @@ job "traefik" {
"--entryPoints.web.address=:80", "--entryPoints.web.address=:80",
"--entryPoints.websecure.address=:443", "--entryPoints.websecure.address=:443",
# "--entryPoints.websecure.tls=true", # "--entryPoints.websecure.tls=true",
# "--entrypoints.web.http.redirections.entryPoint.to=websecure", "--entrypoints.web.http.redirections.entryPoint.to=websecure",
# "--entryPoints.admin.address=:8080", # "--entryPoints.admin.address=:8080",
"--accesslog=true", "--accesslog=true",
"--api=true", "--api=true",
@ -91,8 +91,8 @@ job "traefik" {
} }
resources { resources {
cpu = 500 cpu = 50
memory = 100 memory = 50
} }
} }
} }

View File

@ -16,16 +16,27 @@ job "whoami" {
network { network {
mode = "bridge" mode = "bridge"
port "web" { port "web" {
# to = 80 host_network = "loopback"
to = 80
} }
} }
service { service {
name = "whoami"
port = "web" port = "web"
connect { connect {
sidecar_service {} sidecar_service {
proxy {
local_service_port = 80
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
} }
check { check {