job "grafana" { datacenters = ["dc1"] group "grafana" { count = 1 network { mode = "bridge" port "web" { host_network = "wesher" to = 3000 } } ephemeral_disk { migrate = true sticky = true } service { name = "grafana" provider = "nomad" port = "web" tags = [ "traefik.enable=true", "traefik.http.routers.grafana.entryPoints=websecure", # "traefik.http.routers.grafana.middlewares=authelia@nomad", ] } task "stunnel" { driver = "docker" lifecycle { hook = "prestart" sidecar = true } config { image = "alpine:3.17" args = ["/bin/sh", "$${NOMAD_TASK_DIR}/start.sh"] } resources { cpu = 100 memory = 100 } template { data = <<EOF set -e apk add stunnel exec stunnel {{ env "NOMAD_TASK_DIR" }}/stunnel.conf EOF destination = "$${NOMAD_TASK_DIR}/start.sh" } template { data = <<EOF syslog = no foreground = yes delay = yes [mysql_client] client = yes accept = 127.0.0.1:3306 {{ range nomadService 1 (env "NOMAD_ALLOC_ID") "mysql-tls" -}} connect = {{ .Address }}:{{ .Port }} {{- end }} PSKsecrets = {{ env "NOMAD_SECRETS_DIR" }}/mysql_stunnel_psk.txt EOF destination = "$${NOMAD_TASK_DIR}/stunnel.conf" } # TODO: Get psk for backup jobs despite multiple job declarations # Probably should use variable ACLs to grant each node job to this path template { data = <<EOF {{- with nomadVar "nomad/jobs/grafana/grafana/stunnel" }}{{ .mysql_stunnel_psk }}{{ end -}} EOF destination = "$${NOMAD_SECRETS_DIR}/mysql_stunnel_psk.txt" } } task "grafana-bootstrap" { driver = "docker" lifecycle { hook = "prestart" sidecar = false } config { image = "mariadb:10" args = [ "/usr/bin/timeout", "2m", "/bin/bash", "-c", "until /usr/bin/mysql --defaults-extra-file=$${NOMAD_SECRETS_DIR}/my.cnf < $${NOMAD_SECRETS_DIR}/bootstrap.sql; do sleep 10; done", ] } template { data = <<EOF [client] host=127.0.0.1 port=3306 user=root {{ with nomadVar "nomad/jobs" -}} password={{ .mysql_root_password }} {{ end -}} EOF destination = "$${NOMAD_SECRETS_DIR}/my.cnf" } template { data = <<EOF {{ with nomadVar "nomad/jobs/grafana" -}} {{ if .db_name -}} CREATE DATABASE IF NOT EXISTS `{{ .db_name }}`; CREATE USER IF NOT EXISTS '{{ .db_user }}'@'%' IDENTIFIED BY '{{ .db_pass }}'; GRANT ALL ON `{{ .db_name }}`.* to '{{ .db_user }}'@'%'; -- Create Read Only user CREATE USER IF NOT EXISTS '{{ .db_user_ro }}'@'%' IDENTIFIED BY '{{ .db_pass_ro }}'; {{ else -}} SELECT 'NOOP'; {{ end -}} {{ end -}} EOF destination = "$${NOMAD_SECRETS_DIR}/bootstrap.sql" } resources { cpu = 50 memory = 50 } } task "grafana" { driver = "docker" config { image = "grafana/grafana:9.4.2" ports = ["web"] } env = { "GF_INSTALL_PLUGINS" = "grafana-clock-panel,grafana-piechart-panel,grafana-polystat-panel,natel-discrete-panel", "GF_PATHS_CONFIG" = "$${NOMAD_ALLOC_DIR}/config/grafana.ini" "GF_PATHS_PROVISIONING" = "$${NOMAD_ALLOC_DIR}/config/provisioning" } template { data = <<EOF {{ with nomadVar "nomad/jobs/grafana" -}} GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }} GF_SMTP_USER={{ .smtp_user }} GF_SMTP_PASSWORD={{ .smtp_password }} GF_EXTERNAL_IMAGE_STORAGE_S3_ACCESS_KEY={{ .minio_access_key }} GF_EXTERNAL_IMAGE_STORAGE_S3_SECRET_KEY={{ .minio_secret_key }} GRAFANA_ALERT_EMAIL_ADDRESSES={{ .alert_email_addresses }} GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET={{ .oidc_secret }} {{ if .db_name -}} # Database storage GF_DATABASE_TYPE=mysql GF_DATABASE_HOST=127.0.0.1:3306 GF_DATABASE_NAME={{ .db_name }} GF_DATABASE_USER={{ .db_user }} GF_DATABASE_PASSWORD={{ .db_pass }} {{- end }} SLACK_BOT_URL={{ .slack_bot_url }} SLACK_BOT_TOKEN={{ .slack_bot_token }} SLACK_HOOK_URL={{ .slack_hook_url }} {{ end -}} EOF env = true destination = "secrets/conf.env" } resources { cpu = 100 memory = 200 } } task "grafana-reprovisioner" { driver = "docker" lifecycle { hook = "prestart" sidecar = true } config { image = "alpine" args = ["$${NOMAD_TASK_DIR}/startup.sh"] } resources { cpu = 100 memory = 100 } env = { LOG_FILE = "/var/log/grafana_reloader.log" } template { data = <<EOF #! /bin/sh apk add curl touch "$LOG_FILE" exec tail -f "$LOG_FILE" EOF perms = "777" destination = "$${NOMAD_TASK_DIR}/startup.sh" } template { data = <<EOF {{ with nomadVar "nomad/jobs/grafana" -}} GF_SECURITY_ADMIN_PASSWORD={{ .admin_pw }} {{ end -}} EOF env = true destination = "$${NOMAD_SECRETS_DIR}/conf.env" } template { data = <<EOF #! /bin/sh exec > "$LOG_FILE" exec 2>&1 GRAFANA_URL=http://127.0.0.1:3000 echo "Reload dashboards" curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/dashboards/reload echo "Reload datasources" curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/datasources/reload echo "Reload plugins" curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/plugins/reload echo "Reload notifications" curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/notifications/reload echo "Reload access-control" curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/access-control/reload echo "Reload alerting" curl -s -S --user admin:$GF_SECURITY_ADMIN_PASSWORD --request POST $GRAFANA_URL/api/admin/provisioning/alerting/reload EOF change_mode = "noop" perms = "777" destination = "$${NOMAD_TASK_DIR}/reload_config.sh" } %{ for config_file in fileset(join("/", [module_path, "grafana"]), "**") ~} template { data = <<EOF ${file(join("/", [module_path, "grafana", config_file]))} EOF destination = "$${NOMAD_ALLOC_DIR}/config/${config_file}" perms = 777 # Set owner to grafana uid # uid = 472 # Change template delimeter for dashboard files that use json and have double curly braces and square braces %{ if length(regexall("dashboard", config_file)) > 0 ~} left_delimiter = "<<<<" right_delimiter = ">>>>" %{ endif } change_mode = "script" change_script { command = "/local/reload_config.sh" } } %{ endfor } } task "grafana-image-renderer" { driver = "docker" config { image = "grafana/grafana-image-renderer:3.6.1" ports = ["renderer"] } env = { "RENDERING_MODE" = "clustered" "RENDERING_CLUSTERING_MODE" = "browser" "RENDERING_CLUSTERING_MAX_CONCURRENCY" = 5 "RENDERING_CLUSTERING_TIMEOUT" = 30 } } } }