Update host networks and proxy mapping

This commit is contained in:
IamTheFij 2022-02-17 14:03:42 -08:00
parent 87dfd449c4
commit 9f49777f1b
8 changed files with 94 additions and 25 deletions

1
nomad/.gitignore vendored
View File

@ -1 +1,2 @@
roles/
venv/

View File

@ -10,14 +10,19 @@ rm-nomad:
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: nomad
nomad:
.PHONY: nomad-up
nomad-up:
hashi-up nomad install \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS) \
--server --client
hashi-up nomad start \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: rm-consul
rm-consul:
@ -27,21 +32,29 @@ rm-consul:
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: consul
consul:
.PHONY: consul-up
consul-up:
hashi-up consul install \
--ssh-target-addr $(SERVER) \
--advertise-addr $(SERVER) \
--client-addr 0.0.0.0 \
--http-addr 0.0.0.0 \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS) \
--advertise-addr $(SERVER) \
--client-addr 0.0.0.0 \
--http-addr 0.0.0.0 \
--connect \
--server
hashi-up consul start \
--ssh-target-addr $(SERVER) \
--ssh-target-key $(SSH_KEY) \
--ssh-target-user $(SSH_USER) \
--ssh-target-sudo-pass $(SSH_TARGET_SUDO_PASS)
.PHONY: cluster
cluster:
cluster: consul-up nomad-up
.PHONY: ansible-cluster
ansible-cluster:
ansible-galaxy install -p roles -r roles/requirements.yml
ansible-playbook -K -vv -i ansible_hosts -M roles/ ./setup-cluster.yml

View File

@ -15,28 +15,37 @@ job "adminer" {
network {
mode = "bridge"
port "adminer" {
static = 8080
host_network = "loopback"
to = 8080
}
}
service {
name = "adminer"
port = "adminer"
connect {
sidecar_service {
proxy {
local_service_port = 8080
upstreams {
destination_name = "mysql-server"
# TODO: how do I get these to not bind to the host eth0 address
local_bind_port = 4040
}
config {
protocol = "tcp"
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 25
}
}
}
tags = [

View File

@ -4,7 +4,6 @@ job "mysql-server" {
group "mysql-server" {
count = 1
# Some affinity to stateful hosts?
restart {
attempts = 10
@ -17,6 +16,7 @@ job "mysql-server" {
mode = "bridge"
port "db" {
static = 3306
to = 3306
}
}
@ -34,6 +34,7 @@ job "mysql-server" {
sidecar_service {}
}
# Can't use a tcp check with bridge network or proxy
# check {
# type = "tcp"
# interval = "10s"
@ -52,6 +53,7 @@ job "mysql-server" {
env = {
"MYSQL_ROOT_PASSWORD" = "supersecretpassword"
# Allow connections from any host
"MYSQL_ROOT_HOST" = "%"
}
@ -61,7 +63,7 @@ job "mysql-server" {
}
resources {
cpu = 500
cpu = 300
memory = 1024
}
}

View File

@ -1,28 +1,28 @@
# Configure Consul provider
variable "consul_address" {
type = string
type = string
default = "http://192.168.2.41:8500"
}
provider "consul" {
address = "${var.consul_address}"
address = var.consul_address
}
# Get Nomad client from Consul
data "consul_service" "read-nomad-cluster" {
name = "nomad-client"
# name = "nomad-clients"
name = "nomad-client"
# name = "nomad-clients"
}
locals {
nomad_node = "${data.consul_service.read-nomad-cluster.service[0]}"
nomad_node = data.consul_service.read-nomad-cluster.service[0]
nomad_node_address = "http://${local.nomad_node.node_address}:${local.nomad_node.port}"
}
# Configure the Consul provider
provider "nomad" {
# address = "http://services.thefij:4646"
address = "${local.nomad_node_address}"
address = local.nomad_node_address
region = "global"
}
@ -64,3 +64,21 @@ resource "nomad_job" "whoami" {
jobspec = file("${path.module}/whoami.nomad")
}
# Create a sample host
resource "nomad_job" "nextcloud-bootstrap" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/nextcloud-bootstrap.nomad")
}
# Create a sample host
resource "nomad_job" "nextcloud" {
hcl2 {
enabled = true
}
jobspec = file("${path.module}/nextcloud.nomad")
}

View File

@ -25,7 +25,7 @@
# value: Hello from Ansible!
# execute_once: true
- name: Build Consul cluster
- name: Build Nomad cluster
hosts: nomad_instances
any_errors_fatal: true
become: true
@ -39,6 +39,21 @@
nomad_cni_enable: true
nomad_docker_enable: true
# nomad_use_consul: true
nomad_bind_address: 0.0.0.0
nomad_host_networks:
# - name: public
# cidr: 192.168.0.0/16
- name: private
cidr: 10.0.0.0/8
reserved_ports: "22"
- name: nomad-bridge
# cidr: 172.26.64.0/20
interface: nomad
reserved_ports: "22"
- name: loopback
interface: lo
reserved_ports: "22"
# TODO: this should probably be based on host
nomad_host_volumes:

View File

@ -63,7 +63,7 @@ job "traefik" {
"--entryPoints.web.address=:80",
"--entryPoints.websecure.address=:443",
# "--entryPoints.websecure.tls=true",
# "--entrypoints.web.http.redirections.entryPoint.to=websecure",
"--entrypoints.web.http.redirections.entryPoint.to=websecure",
# "--entryPoints.admin.address=:8080",
"--accesslog=true",
"--api=true",
@ -91,8 +91,8 @@ job "traefik" {
}
resources {
cpu = 500
memory = 100
cpu = 50
memory = 50
}
}
}

View File

@ -16,16 +16,27 @@ job "whoami" {
network {
mode = "bridge"
port "web" {
# to = 80
host_network = "loopback"
to = 80
}
}
service {
name = "whoami"
port = "web"
connect {
sidecar_service {}
sidecar_service {
proxy {
local_service_port = 80
}
}
sidecar_task {
resources {
cpu = 50
memory = 50
}
}
}
check {