job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
  datacenters = ["dc1"]
  priority = 90
  %{ if batch_node == null ~}
  type = "system"
  %{ else ~}
  type = "batch"

  parameterized {
    meta_required = ["job_name"]
    meta_optional = ["task", "snapshot"]

  }

  meta {
    task = "backup"
    snapshot = "latest"
  }
  %{ endif ~}

  %{ if batch_node == null ~}
  constraint {
    attribute = "$${node.unique.name}"
    operator = "set_contains_any"
    # Only deploy to nodes running tasks to backup
    value = "n1,n2"
  }
  %{ else ~}
  constraint {
    attribute = "$${node.unique.name}"
    value = "${batch_node}"
  }
  %{ endif ~}

  group "backup" {

    network {
      mode = "bridge"

      port "metrics" {
        to = 8080
      }
    }

    volume "all-volumes" {
      type = "host"
      read_only = false
      source = "all-volumes"
    }

    service {
      name = "backups"
      port = "metrics"

      # Add connect to mysql
      connect {
        sidecar_service {
          proxy {
            local_service_port = 8080

            # TODO: Do I need this?
            expose {
              path {
                path = "/metrics"
                protocol = "http"
                local_path_port = 8080
                listener_port = "metrics"
              }
            }

            upstreams {
              destination_name = "mysql-server"
              local_bind_port = 6060
            }

            config {
              protocol = "tcp"
            }
          }
        }

        sidecar_task {
          resources {
            cpu    = 50
            memory = 50
          }
        }
      }

      check {
        port = "metrics"
        type = "http"
        path = "/health"
        interval = "10s"
        timeout = "3s"
      }

      meta {
        metrics_addr = "$${NOMAD_ADDR_metrics}"
      }
    }

    task "backup" {
      driver = "docker"

      volume_mount {
        volume = "all-volumes"
        destination = "/data"
        read_only = false
      }

      config {
        image = "iamthefij/resticscheduler"
        ports = ["metrics"]
        args = [
          %{ if batch_node != null ~}
          "-once",
          "-$${NOMAD_META_task}",
          "$${NOMAD_META_job_name}",
          %{ endif ~}
          "$${NOMAD_TASK_DIR}/node-jobs.hcl",
        ]
      }

      vault {
        policies = [
          "access-tables",
          "nomad-task",
        ]
      }

      env = {
        "MYSQL_HOST" = "$${NOMAD_UPSTREAM_IP_mysql_server}"
        "MYSQL_PORT" = "$${NOMAD_UPSTREAM_PORT_mysql_server}"
        "RCLONE_CHECKERS" = "2"
        "RCLONE_TRANSFERS" = "2"
        "RCLONE_FTP_CONCURRENCY" = "5"
      }

      template {
        data = <<EOF
{{ with secret "kv/data/mysql" }}
MYSQL_USER=root
MYSQL_PASSWORD={{ .Data.data.root_password }}
{{ end -}}
{{ with secret "kv/data/backups" -}}
BACKUP_PASSPHRASE={{ .Data.data.backup_passphrase }}
RCLONE_FTP_HOST={{ .Data.data.nas_ftp_host }}
RCLONE_FTP_USER={{ .Data.data.nas_ftp_user }}
RCLONE_FTP_PASS={{ .Data.data.nas_ftp_pass | toJSON }}
RCLONE_FTP_EXPLICIT_TLS=true
RCLONE_FTP_NO_CHECK_CERTIFICATE=true
{{ end -}}
        EOF
        destination = "secrets/db.env"
        env = true
      }

      template {
        data = <<EOH
CONSUL_HTTP_ADDR={{ env "attr.unique.network.ip-address" }}:8500
        EOH
        destination = "local/consul.env"
        env = true
      }


      template {
        # Build jobs based on node
        data = <<EOF
# Current node is {{ env "node.unique.name" }}
{{ if eq (env "node.unique.name") "n2" -}}
# consul backup
${file("${module_path}/jobs/consul.hcl")}
{{ end -}}

{{ range service "nextcloud" -}}
# nextcloud .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/nextcloud.hcl")}
{{ end -}}
{{ end -}}

{{ range service "grafana" -}}
# grafana .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/grafana.hcl")}
{{ end -}}
{{ end -}}

{{ range service "photoprism" -}}
# photoprism .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/photoprism.hcl")}
{{ end -}}
{{ end -}}

{{ range service "lldap" -}}
# lldap .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/lldap.hcl")}
{{ end -}}
{{ end -}}

{{ range service "sonarr" -}}
# sonarr .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/sonarr.hcl")}
{{ end -}}
{{ end -}}

{{ range service "nzbget" -}}
# nzbget .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/nzbget.hcl")}
{{ end -}}
{{ end -}}
        EOF
        destination = "local/node-jobs.hcl"
      }

      resources {
        cpu = 50
        memory = 256
      }
    }
  }
}