254 lines
5.5 KiB
HCL
254 lines
5.5 KiB
HCL
job "loki" {
|
|
datacenters = ["dc1"]
|
|
type = "service"
|
|
priority = 55
|
|
|
|
group "loki" {
|
|
count = 1
|
|
|
|
network {
|
|
mode = "bridge"
|
|
|
|
port "main" {
|
|
%{~ if use_wesher ~}
|
|
host_network = "wesher"
|
|
%{~ endif ~}
|
|
to = 3100
|
|
}
|
|
}
|
|
|
|
# Loki uses disk storage. We want to preserve, if possible
|
|
# but some loss is ok.
|
|
ephemeral_disk {
|
|
migrate = true
|
|
sticky = true
|
|
}
|
|
|
|
task "loki" {
|
|
driver = "docker"
|
|
|
|
service {
|
|
name = "loki"
|
|
provider = "nomad"
|
|
port = "main"
|
|
|
|
tags = [
|
|
"traefik.enable=true",
|
|
"traefik.http.routers.loki.entryPoints=websecure",
|
|
]
|
|
|
|
check {
|
|
type = "http"
|
|
path = "/ready"
|
|
interval = "30s"
|
|
timeout = "2s"
|
|
|
|
check_restart {
|
|
limit = 5
|
|
grace = "90s"
|
|
}
|
|
}
|
|
}
|
|
|
|
config {
|
|
image = "grafana/loki:2.8.7"
|
|
ports = ["main"]
|
|
args = ["--config.file=$${NOMAD_TASK_DIR}/loki-config.yml"]
|
|
}
|
|
|
|
template {
|
|
data = <<EOF
|
|
auth_enabled: false
|
|
|
|
server:
|
|
http_listen_port: 3100
|
|
|
|
common:
|
|
ring:
|
|
instance_addr: 127.0.0.1
|
|
kvstore:
|
|
store: inmemory
|
|
replication_factor: 1
|
|
path_prefix: /tmp/loki
|
|
|
|
schema_config:
|
|
configs:
|
|
- from: 2020-05-15
|
|
store: boltdb-shipper
|
|
object_store: filesystem
|
|
schema: v11
|
|
index:
|
|
prefix: index_
|
|
period: 24h
|
|
|
|
storage_config:
|
|
boltdb_shipper:
|
|
active_index_directory: {{ env "NOMAD_TASK_DIR" }}/index
|
|
|
|
filesystem:
|
|
directory: {{ env "NOMAD_TASK_DIR" }}/chunks
|
|
|
|
limits_config:
|
|
enforce_metric_name: false
|
|
reject_old_samples: true
|
|
reject_old_samples_max_age: 168h
|
|
|
|
chunk_store_config:
|
|
max_look_back_period: 168h
|
|
|
|
table_manager:
|
|
retention_deletes_enabled: true
|
|
retention_period: 168h
|
|
|
|
EOF
|
|
destination = "$${NOMAD_TASK_DIR}/loki-config.yml"
|
|
|
|
env = false
|
|
}
|
|
|
|
resources {
|
|
cpu = 50
|
|
memory = 100
|
|
}
|
|
}
|
|
}
|
|
|
|
group "promtail" {
|
|
# TODO: This must be updated to match the nubmer of servers (possibly grabbed from TF)
|
|
# I am moving away from `system` jobs because of https://github.com/hashicorp/nomad/issues/1202
|
|
count = 2
|
|
|
|
constraint {
|
|
distinct_hosts = true
|
|
}
|
|
|
|
network {
|
|
mode = "bridge"
|
|
|
|
port "promtail" {
|
|
%{~ if use_wesher ~}
|
|
host_network = "wesher"
|
|
%{~ endif ~}
|
|
to = 9080
|
|
}
|
|
}
|
|
|
|
service {
|
|
name = "promtail"
|
|
provider = "nomad"
|
|
port = "promtail"
|
|
|
|
meta {
|
|
nomad_dc = "$${NOMAD_DC}"
|
|
nomad_node_name = "$${node.unique.name}"
|
|
}
|
|
|
|
tags = [
|
|
"prometheus.scrape",
|
|
]
|
|
}
|
|
|
|
task "promtail" {
|
|
driver = "docker"
|
|
|
|
config {
|
|
image = "grafana/promtail:3.3.0"
|
|
args = ["-config.file=$${NOMAD_TASK_DIR}/promtail.yml"]
|
|
ports = ["promtail"]
|
|
|
|
# Bind mount host machine-id and log directories
|
|
|
|
mount {
|
|
type = "bind"
|
|
source = "/etc/machine-id"
|
|
target = "/etc/machine-id"
|
|
readonly = true
|
|
}
|
|
|
|
mount {
|
|
type = "bind"
|
|
source = "/var/log/journal/"
|
|
target = "/var/log/journal/"
|
|
readonly = true
|
|
}
|
|
|
|
mount {
|
|
type = "bind"
|
|
source = "/run/log/journal/"
|
|
target = "/run/log/journal/"
|
|
readonly = true
|
|
}
|
|
|
|
# mount {
|
|
# type = "bind"
|
|
# source = "/var/log/audit"
|
|
# target = "/var/log/audit"
|
|
# readonly = true
|
|
# }
|
|
}
|
|
|
|
template {
|
|
data = <<EOF
|
|
---
|
|
server:
|
|
http_listen_address: 0.0.0.0
|
|
http_listen_port: 9080
|
|
|
|
clients:
|
|
{{ range nomadService 1 (env "NOMAD_ALLOC_ID") "loki" -}}
|
|
- url: http://{{ .Address }}:{{ .Port }}/loki/api/v1/push
|
|
{{- end }}
|
|
|
|
scrape_configs:
|
|
|
|
- job_name: journal
|
|
journal:
|
|
json: false
|
|
max_age: 12h
|
|
path: /var/log/journal
|
|
labels:
|
|
job: systemd-journal
|
|
relabel_configs:
|
|
- source_labels: ['__journal__systemd_unit']
|
|
target_label: unit
|
|
- source_labels: ['__journal__hostname']
|
|
target_label: hostname
|
|
- source_labels: ['__journal__transport']
|
|
target_label: journal_transport
|
|
# Docker log labels
|
|
- source_labels: ['__journal_syslog_identifier']
|
|
target_label: syslog_identifier
|
|
- source_labels: ['__journal_image_name']
|
|
target_label: docker_image_name
|
|
- source_labels: ['__journal_container_name']
|
|
target_label: docker_container_name
|
|
- source_labels: ['__journal_container_id']
|
|
target_label: docker_container_id
|
|
- source_labels: ['__journal_com_docker_compose_project']
|
|
target_label: docker_compose_project
|
|
- source_labels: ['__journal_com_docker_compose_service']
|
|
target_label: docker_compose_service
|
|
- source_labels: ['__journal_com_hashicorp_nomad_alloc_id']
|
|
target_label: nomad_alloc_id
|
|
- source_labels: ['__journal_com_hashicorp_nomad_job_id']
|
|
target_label: nomad_job_id
|
|
- source_labels: ['__journal_com_hashicorp_nomad_job_name']
|
|
target_label: nomad_job_name
|
|
- source_labels: ['__journal_com_hashicorp_nomad_node_name']
|
|
target_label: nomad_node_name
|
|
- source_labels: ['__journal_com_hashicorp_nomad_group_name']
|
|
target_label: nomad_group_name
|
|
- source_labels: ['__journal_com_hashicorp_nomad_task_name']
|
|
target_label: nomad_task_name
|
|
EOF
|
|
destination = "$${NOMAD_TASK_DIR}/promtail.yml"
|
|
}
|
|
|
|
resources {
|
|
cpu = 50
|
|
memory = 100
|
|
}
|
|
}
|
|
}
|
|
}
|