homelab-nomad/services/backups/backup.nomad

214 lines
4.5 KiB
HCL

job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
datacenters = ["dc1"]
priority = 90
%{ if batch_node == null ~}
type = "system"
%{ else ~}
type = "batch"
parameterized {
meta_required = ["job_name"]
meta_optional = ["task", "snapshot"]
}
meta {
task = "backup"
snapshot = "latest"
}
%{ endif ~}
%{ if batch_node != null ~}
constraint {
attribute = "$${node.unique.name}"
value = "${batch_node}"
}
%{ endif ~}
group "backup" {
network {
mode = "bridge"
port "metrics" {
host_network = "wesher"
to = 8080
}
}
volume "all-volumes" {
type = "host"
read_only = false
source = "all-volumes"
}
service {
name = "backup"
provider = "nomad"
port = "metrics"
tags = [
"prometheus.scrape"
]
}
task "backup" {
driver = "docker"
volume_mount {
volume = "all-volumes"
destination = "/data"
read_only = false
}
config {
image = "iamthefij/resticscheduler:0.1.1"
ports = ["metrics"]
args = [
%{ if batch_node != null ~}
"-once",
"-$${NOMAD_META_task}",
"$${NOMAD_META_job_name}",
"--snapshot",
"$${NOMAD_META_snapshot}",
"--push-gateway",
"http://pushgateway.nomad:9091",
%{ endif ~}
"$${NOMAD_TASK_DIR}/node-jobs.hcl",
]
}
env = {
"RCLONE_CHECKERS" = "2"
"RCLONE_TRANSFERS" = "2"
"RCLONE_FTP_CONCURRENCY" = "5"
}
template {
data = <<EOF
MYSQL_HOST=127.0.0.1
MYSQL_PORT=3306
# TODO: Move this to new mysql root pass path
{{ with nomadVar "nomad/jobs" }}
MYSQL_USER=root
MYSQL_PASSWORD={{ .mysql_root_password }}
{{ end -}}
{{ with nomadVar (print "nomad/jobs/" (env "NOMAD_JOB_ID")) -}}
BACKUP_PASSPHRASE={{ .backup_passphrase }}
RCLONE_FTP_HOST={{ .nas_ftp_host }}
RCLONE_FTP_USER={{ .nas_ftp_user }}
RCLONE_FTP_PASS={{ .nas_ftp_pass.Value | toJSON }}
RCLONE_FTP_EXPLICIT_TLS=true
RCLONE_FTP_NO_CHECK_CERTIFICATE=true
{{ end -}}
EOF
destination = "secrets/db.env"
env = true
}
template {
# Build jobs based on node
data = <<EOF
# Current node is {{ env "node.unique.name" }} {{ env "node.unique.id" }}
{{ range nomadService "grafana" -}}
# grafana .Node {{ .Node }}
{{ if eq .Node (env "node.unique.id") -}}
${file("${module_path}/jobs/grafana.hcl")}
{{- end }}
{{- end }}
{{ range nomadService "photoprism" -}}
# photoprism .Node {{ .Node }}
{{ if eq .Node (env "node.unique.id") -}}
${file("${module_path}/jobs/photoprism.hcl")}
{{- end }}
{{- end }}
{{ range nomadService "lldap" -}}
# lldap .Node {{ .Node }}
{{ if eq .Node (env "node.unique.id") -}}
${file("${module_path}/jobs/lldap.hcl")}
{{- end }}
{{- end }}
{{ range nomadService "sonarr" -}}
# sonarr .Node {{ .Node }}
{{ if eq .Node (env "node.unique.id") -}}
${file("${module_path}/jobs/sonarr.hcl")}
{{- end }}
{{- end }}
{{ range nomadService "nzbget" -}}
# nzbget .Node {{ .Node }}
{{ if eq .Node (env "node.unique.id") -}}
${file("${module_path}/jobs/nzbget.hcl")}
{{- end }}
{{- end }}
EOF
destination = "local/node-jobs.hcl"
}
resources {
cpu = 50
memory = 256
}
}
task "stunnel" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "alpine:3.17"
args = ["/bin/sh", "$${NOMAD_TASK_DIR}/start.sh"]
}
resources {
cpu = 100
memory = 100
}
template {
data = <<EOF
set -e
apk add stunnel
exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf
EOF
destination = "$${NOMAD_TASK_DIR}/start.sh"
}
template {
data = <<EOF
syslog = no
foreground = yes
delay = yes
[mysql_client]
client = yes
accept = 127.0.0.1:3306
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}}
connect = {{ .Address }}:{{ .Port }}
{{- end }}
PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt
EOF
destination = "$${NOMAD_TASK_DIR}/stunnel.conf"
}
# TODO: Get psk for backup jobs despite multiple job declarations
# Probably should use variable ACLs to grant each node job to this path
template {
data = <<EOF
{{- with nomadVar (print "nomad/jobs/" (env "NOMAD_JOB_ID")) }}{{ .mysql_stunnel_psk }}{{ end -}}
EOF
destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt"
}
}
}
}