job "backup%{ if batch_node != null }-oneoff-${batch_node}%{ endif }" {
  datacenters = ["dc1"]
  %{ if batch_node == null ~}
  type = "system"
  %{ else ~}
  type = "batch"

  parameterized {
    meta_required = ["job_name"]
    meta_optional = ["task", "snapshot"]

  }

  meta {
    task = "backup"
    snapshot = "latest"
  }
  %{ endif ~}

  %{ if batch_node == null ~}
  constraint {
    attribute = "$${node.unique.name}"
    operator = "set_contains_any"
    # Only deploy to nodes running tasks to backup
    value = "n1,n2"
  }
  %{ else ~}
  constraint {
    attribute = "$${node.unique.name}"
    value = "${batch_node}"
  }
  %{ endif ~}

  group "backup" {

    network {
      mode = "bridge"

      port "metrics" {
        to = 8080
      }
    }

    volume "all-volumes" {
      type = "host"
      read_only = false
      source = "all-volumes"
    }

    service {
      port = "metrics"

      # Add connect to mysql
      connect {
        sidecar_service {
          proxy {
            local_service_port = 8080

            upstreams {
              destination_name = "mysql-server"
              local_bind_port = 6060
            }

            config {
              protocol = "tcp"
            }
          }
        }

        sidecar_task {
          resources {
            cpu    = 50
            memory = 50
          }
        }
      }

      meta {
        metrics_addr = "$${NOMAD_ADDR_metrics}"
      }
    }

    task "backup" {
      driver = "docker"

      volume_mount {
        volume = "all-volumes"
        destination = "/data"
        read_only = false
      }

      config {
        image = "iamthefij/resticscheduler"
        ports = ["metrics"]
        args = [
          %{ if batch_node != null ~}
          "-once",
          "-$${NOMAD_META_task}",
          "$${NOMAD_META_job_name}",
          %{ endif ~}
          "/jobs/node-jobs.hcl",
        ]

        mount {
          type = "bind"
          target = "/jobs"
          source = "jobs"
        }
      }

      vault {
        policies = [
          "access-tables",
          "nomad-task",
        ]
      }

      env = {
        "MYSQL_HOST" = "$${NOMAD_UPSTREAM_IP_mysql_server}"
        "MYSQL_PORT" = "$${NOMAD_UPSTREAM_PORT_mysql_server}"
      }

      template {
        # Probably want to use database credentials that have access to dump all tables
        data = <<EOF
{{ with secret "kv/data/nextcloud" -}}
MYSQL_DATABASE={{ .Data.data.db_name }}
MYSQL_USER={{ .Data.data.db_user }}
MYSQL_PASSWORD={{ .Data.data.db_pass }}
{{ end -}}
{{ with secret "kv/data/backups" -}}
BACKUP_PASSPHRASE={{ .Data.data.backup_passphrase }}
RCLONE_FTP_HOST={{ .Data.data.nas_ftp_host }}
RCLONE_FTP_USER={{ .Data.data.nas_ftp_user }}
RCLONE_FTP_PASS={{ .Data.data.nas_ftp_pass | toJSON }}
RCLONE_FTP_EXPLICIT_TLS=true
RCLONE_FTP_NO_CHECK_CERTIFICATE=true
{{ end -}}
        EOF
        destination = "secrets/db.env"
        env = true
      }

      template {
        data = <<EOH
CONSUL_HTTP_ADDR={{ env "attr.unique.network.ip-address" }}:8500
        EOH
        destination = "local/consul.env"
        env = true
      }


      template {
        # Build jobs based on node
        data = <<EOF
# Current node is {{ env "node.unique.name" }}
{{ if eq (env "node.unique.name") "n2" -}}
# Consul backup
${file("${module_path}/jobs/consul.hcl")}
{{ end -}}
{{ range service "nextcloud" -}}
# Nextcloud .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/nextcloud.hcl")}
{{ end -}}
{{ end -}}
{{ range service "lldap" -}}
# Lldap .Node {{ .Node }}
{{ if eq .Node (env "node.unique.name") -}}
${file("${module_path}/jobs/lldap.hcl")}
{{ end -}}
{{ end -}}
        EOF
        destination = "jobs/node-jobs.hcl"
      }

      resources {
        cpu = 50
        memory = 256
      }
    }
  }
}