Compare commits

..

11 Commits

Author SHA1 Message Date
4597842ccf Clean up slimmed build
All checks were successful
continuous-integration/drone/push Build is passing
2019-06-07 16:29:53 -07:00
64b28038e9 Rename binary within Docker image
All checks were successful
continuous-integration/drone/push Build is passing
2019-06-07 16:21:02 -07:00
726748852e Move src to gopath subdir
All checks were successful
continuous-integration/drone/push Build is passing
2019-06-07 16:19:56 -07:00
758180407f Move src to gopath
Some checks failed
continuous-integration/drone/push Build is failing
2019-06-07 16:19:03 -07:00
b1be36afdf Try to change workspace
Some checks failed
continuous-integration/drone/push Build is failing
2019-06-07 16:17:12 -07:00
02899944a9 Try to speed up build
Some checks failed
continuous-integration/drone/push Build is failing
2019-06-07 16:15:50 -07:00
d2807c74b1 Add back manifest and correct tagging
Some checks failed
continuous-integration/drone/push Build is failing
2019-06-07 15:55:21 -07:00
12f48e2c6d Try to get it to build again
All checks were successful
continuous-integration/drone/push Build is passing
2019-06-07 15:46:12 -07:00
7b94fb20f5 Try to get it to build again
All checks were successful
continuous-integration/drone/push Build is passing
2019-06-07 15:43:24 -07:00
6d8a666213 Try to get it to build
All checks were successful
continuous-integration/drone/push Build is passing
2019-06-07 15:41:16 -07:00
7c6e536ec3 Try out multi arch builds
All checks were successful
continuous-integration/drone/push Build is passing
2019-06-07 15:40:04 -07:00
18 changed files with 235 additions and 1912 deletions

View File

@ -1,25 +1,15 @@
---
kind: pipeline
name: test
workspace:
base: /go/src/dockron
steps:
- name: test
image: golang:1.22
- name: build
image: golang:1.11
commands:
- make test
- name: check
image: iamthefij/drone-pre-commit:personal
environment:
SKIP: docker-compose-check
# - name: itest
# image: docker/compose:alpine-1.26.2
# environment:
# VERSION: ${DRONE_TAG:-${DRONE_COMMIT}}
# commands:
# - apk add make bash
# - make itest
- go get -u github.com/golang/dep/cmd/dep
- make build
---
kind: pipeline
@ -36,13 +26,26 @@ trigger:
- refs/heads/master
- refs/tags/v*
workspace:
base: /go/src/dockron
steps:
- name: build
image: golang:1.22
environment:
VERSION: ${DRONE_TAG:-${DRONE_COMMIT}}
image: golang:1.11
commands:
- make build-linux-static
- go get -u github.com/golang/dep/cmd/dep
- make build-all-static
- name: push image - amd64
image: plugins/docker
settings:
repo: iamthefij/dockron
auto_tag: true
auto_tag_suffix: linux-amd64
username:
from_secret: docker_username
password:
from_secret: docker_password
- name: push image - arm
image: plugins/docker
@ -56,7 +59,7 @@ steps:
from_secret: docker_password
build_args:
- ARCH=arm
- REPO=arm32v7
- REPO=arm32v6
- name: push image - arm64
image: plugins/docker
@ -69,19 +72,8 @@ steps:
password:
from_secret: docker_password
build_args:
- ARCH=arm64
- REPO=arm64v8
- name: push image - amd64
image: plugins/docker
settings:
repo: iamthefij/dockron
auto_tag: true
auto_tag_suffix: linux-amd64
username:
from_secret: docker_username
password:
from_secret: docker_password
- ARCH=arm
- REPO=arm32v6
- name: publish manifest
image: plugins/manifest
@ -93,28 +85,3 @@ steps:
from_secret: docker_username
password:
from_secret: docker_password
---
kind: pipeline
name: notify
depends_on:
- test
- publish
trigger:
status:
- failure
steps:
- name: notify
image: drillster/drone-email
settings:
host:
from_secret: SMTP_HOST # pragma: whitelist secret
username:
from_secret: SMTP_USER # pragma: whitelist secret
password:
from_secret: SMTP_PASS # pragma: whitelist secret
from: drone@iamthefij.com

6
.gitignore vendored
View File

@ -25,11 +25,7 @@ _testmain.go
*.prof
# Output
coverage.out
dockron
dockron-*
dockron-linux-*
# deps
vendor/
# Test output
itest/*_result.txt

View File

@ -1,36 +0,0 @@
---
linters:
enable:
- errname
- errorlint
- exhaustive
- gofumpt
- goimports
- goprintffuncname
- misspell
- gomnd
- tagliatelle
- tenv
- testpackage
- thelper
- tparallel
- unconvert
- wrapcheck
- wsl
disable:
- gochecknoglobals
linters-settings:
gosec:
excludes:
- G204
tagliatelle:
case:
rules:
yaml: snake
issues:
exclude-rules:
- path: _test\.go
linters:
- gosec

View File

@ -1,22 +0,0 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
hooks:
- id: check-added-large-files
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-merge-conflict
- repo: https://github.com/golangci/golangci-lint
rev: v1.52.2
hooks:
- id: golangci-lint
args: ["--timeout=5m"]
- repo: https://github.com/IamTheFij/docker-pre-commit
rev: v3.0.1
hooks:
- id: docker-compose-check
- repo: https://github.com/hadolint/hadolint
rev: v2.12.0
hooks:
- id: hadolint

View File

@ -1,6 +1,21 @@
FROM scratch
ARG REPO=library
# FROM golang:1.11-alpine AS builder
#
# RUN apk add --no-cache git
# RUN go get -u github.com/golang/dep/cmd/dep
#
# WORKDIR /go/src/app/
# COPY ./Gopkg.* /go/src/app/
# RUN dep ensure --vendor-only
#
# COPY ./main.go /go/src/app/
#
# RUN CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} go build -a -installsuffix nocgo -o dockron .
FROM ${REPO}/busybox:latest
WORKDIR /root/
# COPY --from=builder /go/src/app/dockron .
ARG ARCH=amd64
COPY ./dockron-linux-${ARCH} /dockron
COPY ./dockron-linux-${ARCH} ./dockron
ENTRYPOINT [ "/dockron" ]
CMD [ "./dockron" ]

View File

@ -1,22 +0,0 @@
ARG REPO=library
FROM golang:1.22-alpine AS builder
# hadolint ignore=DL3018
RUN apk add --no-cache git && \
mkdir /app
WORKDIR /app
COPY ./go.mod ./go.sum /app/
RUN go mod download
COPY ./main.go /app/
ARG ARCH=amd64
ARG VERSION=dev
ENV CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH}
RUN go build -ldflags "-X main.version=${VERSION}" -a -installsuffix nocgo -o dockron .
FROM scratch
COPY --from=builder /app/dockron /
ENTRYPOINT [ "/dockron" ]

63
Gopkg.lock generated Normal file
View File

@ -0,0 +1,63 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/Microsoft/go-winio"
packages = ["."]
revision = "a6d595ae73cf27a1b8fc32930668708f45ce1c85"
version = "v0.4.9"
[[projects]]
name = "github.com/docker/distribution"
packages = ["digest","reference"]
revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"
version = "v2.6.2"
[[projects]]
name = "github.com/docker/docker"
packages = ["api/types","api/types/blkiodev","api/types/container","api/types/events","api/types/filters","api/types/mount","api/types/network","api/types/reference","api/types/registry","api/types/strslice","api/types/swarm","api/types/time","api/types/versions","api/types/volume","client","pkg/tlsconfig"]
revision = "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
version = "v1.13.1"
[[projects]]
name = "github.com/docker/go-connections"
packages = ["nat","sockets","tlsconfig"]
revision = "7395e3f8aa162843a74ed6d48e79627d9792ac55"
version = "v0.4.0"
[[projects]]
name = "github.com/docker/go-units"
packages = ["."]
revision = "47565b4f722fb6ceae66b95f853feed578a4a51c"
version = "v0.3.3"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
name = "github.com/robfig/cron"
packages = ["."]
revision = "b41be1df696709bb6395fe435af20370037c0b4c"
version = "v1.1"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context","context/ctxhttp","internal/socks","proxy"]
revision = "f4c29de78a2a91c00474a2e689954305c350adf9"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["windows"]
revision = "0ffbfd41fbef8ffcf9b62b0b0aa3a5873ed7a4fe"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "dd521cf26a7594f53c78967b3d38f8e0de25745c662c03f6e73effaf2b59347d"
solver-name = "gps-cdcl"
solver-version = 1

33
Gopkg.toml Normal file
View File

@ -0,0 +1,33 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
name = "github.com/docker/docker"
version = "1.13.1"
[[constraint]]
name = "github.com/robfig/cron"
version = "1.1.0"
[[constraint]]
branch = "master"
name = "golang.org/x/net"

137
Makefile
View File

@ -1,141 +1,74 @@
OUTPUT ?= dockron
DOCKER_TAG ?= $(OUTPUT)-dev-$(USER)
GIT_TAG_NAME := $(shell git tag -l --contains HEAD)
GIT_SHA := $(shell git rev-parse HEAD)
VERSION ?= $(if $(GIT_TAG_NAME),$(GIT_TAG_NAME),$(GIT_SHA))
GOFILES = *.go go.mod go.sum
DOCKER_TAG ?= dockron-dev-${USER}
.PHONY: default
default: build
.PHONY: all
all: check test itest
# Downloads dependencies into vendor directory
vendor: $(GOFILES)
go mod vendor
vendor:
dep ensure
# Runs the application, useful while developing
.PHONY: run
run:
go run . -watch 10s -debug
.PHONY: test
test:
go test -coverprofile=coverage.out
go tool cover -func=coverage.out
@go tool cover -func=coverage.out | awk -v target=75.0% \
'/^total:/ { print "Total coverage: " $$3 " Minimum coverage: " target; if ($$3+0.0 >= target+0.0) print "ok"; else { print "fail"; exit 1; } }'
.PHONY: itest
itest:
./itest/itest.sh
# Installs pre-commit hooks
.PHONY: install-hooks
install-hooks:
pre-commit install --install-hooks
# Runs pre-commit checks on files
.PHONY: check
check:
pre-commit run --all-files
run: vendor
go run *.go
# Output target
$(OUTPUT): $(GOFILES)
@echo Version: $(VERSION)
go build -ldflags '-X "main.version=$(VERSION)"' -o $(OUTPUT)
dockron: vendor
go build -o dockron
# Alias for building
.PHONY: build
build: $(OUTPUT)
build: dockron
$(OUTPUT)-darwin-amd64: $(GOFILES)
GOOS=darwin GOARCH=amd64 CGO_ENABLED=0 \
go build -ldflags '-X "main.version=$(VERSION)"' -a -installsuffix nocgo \
-o $(OUTPUT)-darwin-amd64
dockron-linux-amd64: vendor
GOARCH=amd64 CGO_ENABLED=0 GOOS=linux go build -a -installsuffix nocgo -o dockron-linux-amd64
$(OUTPUT)-linux-amd64: $(GOFILES)
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 \
go build -ldflags '-X "main.version=$(VERSION)"' -a -installsuffix nocgo \
-o $(OUTPUT)-linux-amd64
dockron-linux-arm: vendor
GOARCH=arm CGO_ENABLED=0 GOOS=linux go build -a -installsuffix nocgo -o dockron-linux-arm
$(OUTPUT)-linux-arm: $(GOFILES)
GOOS=linux GOARCH=arm CGO_ENABLED=0 \
go build -ldflags '-X "main.version=$(VERSION)"' -a -installsuffix nocgo \
-o $(OUTPUT)-linux-arm
$(OUTPUT)-linux-arm64: $(GOFILES)
GOOS=linux GOARCH=arm64 CGO_ENABLED=0 \
go build -ldflags '-X "main.version=$(VERSION)"' -a -installsuffix nocgo \
-o $(OUTPUT)-linux-arm64
.PHONY: build-linux-static
build-linux-static: $(OUTPUT)-linux-amd64 $(OUTPUT)-linux-arm $(OUTPUT)-linux-arm64
dockron-linux-arm64: vendor
GOARCH=arm64 CGO_ENABLED=0 GOOS=linux go build -a -installsuffix nocgo -o dockron-linux-arm64
.PHONY: build-all-static
build-all-static: $(OUTPUT)-darwin-amd64 build-linux-static
build-all-static: dockron-linux-amd64 dockron-linux-arm dockron-linux-arm64
# Cleans all build artifacts
.PHONY: clean
clean:
rm -f $(OUTPUT)
rm -f $(OUTPUT)-linux-*
rm dockron
# Cleans vendor directory
.PHONY: clean-vendor
clean-vendor:
rm -fr ./vendor
# Attempts to update dependencies
.PHONY: dep-update
dep-update:
dep ensure -update
.PHONY: docker-build
docker-build: $(OUTPUT)-linux-amd64
docker build . -t $(DOCKER_TAG)-linux-amd64
docker-build:
docker build . -t ${DOCKER_TAG}-linux-amd64
# Cross build for arm architechtures
.PHONY: docker-build-arm
docker-build-arm: $(OUTPUT)-linux-arm
docker build --build-arg REPO=arm32v7 --build-arg ARCH=arm . -t $(DOCKER_TAG)-linux-arm
.PHONY: docker-cross-build-arm
docker-cross-build-arm:
docker build --build-arg REPO=arm32v6 --build-arg ARCH=arm . -t ${DOCKER_TAG}-linux-arm
.PHONY: docker-build-arm
docker-build-arm64: $(OUTPUT)-linux-arm64
docker build --build-arg REPO=arm64v8 --build-arg ARCH=arm64 . -t $(DOCKER_TAG)-linux-arm64
.PHONY: docker-cross-build-arm
docker-cross-build-arm64:
docker build --build-arg REPO=arm64v8 --build-arg ARCH=arm64 . -t ${DOCKER_TAG}-linux-arm64
.PHONY: docker-run
docker-run: docker-build
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock --name $(DOCKER_TAG)-run $(DOCKER_TAG)-linux-amd64
# Cross run on host architechture
.PHONY: docker-run-arm
docker-run-arm: docker-build-arm
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock --name $(DOCKER_TAG)-run $(DOCKER_TAG)-linux-arm
.PHONY: docker-cross-run-arm
docker-cross-run-arm: docker-cross-build-arm
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock --name $(DOCKER_TAG)-run ${DOCKER_TAG}-linux-arm
.PHONY: docker-run-arm64
docker-run-arm64: docker-build-arm64
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock --name $(DOCKER_TAG)-run $(DOCKER_TAG)-linux-arm64
# Multi stage builds
.PHONY: docker-staged-build
docker-staged-build:
docker build --build-arg VERSION=$(VERSION) \
-t $(DOCKER_TAG)-linux-amd64 \
-f Dockerfile.multi-stage .
# Cross build for arm architechtures
.PHONY: docker-staged-build-arm
docker-staged-build-arm:
docker build --build-arg VERSION=$(VERSION) \
--build-arg REPO=arm32v7 --build-arg ARCH=arm -t $(DOCKER_TAG)-linux-arm \
-f Dockerfile.multi-stage .
.PHONY: docker-staged-build-arm
docker-staged-build-arm64:
docker build --build-arg VERSION=$(VERSION) \
--build-arg REPO=arm64v8 --build-arg ARCH=arm64 -t $(DOCKER_TAG)-linux-arm64 \
-f Dockerfile.multi-stage .
.PHONY: docker-example
docker-example:
# Uses multistage
docker-compose build
docker-compose up
.PHONY: docker-cross-run-arm64
docker-cross-run-arm64: docker-cross-build-arm64
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock --name $(DOCKER_TAG)-run ${DOCKER_TAG}-linux-arm64

View File

@ -16,42 +16,14 @@ It will then run in the foreground, periodically checking Docker for containers
By default, Dockron will periodically poll Docker for new containers or schedule changes every minute. You can specify an interval by using the `-watch` flag.
### Running with Docker
Dockron is also available as a Docker image. The multi-arch repo can be found at [IamTheFij/dockron](https://hub.docker.com/r/iamthefij/dockron)
From either an `amd64`, `arm`, or `arm64` machine, you can run Dockron using:
docker run -v /var/run/docker.sock:/var/run/docker.sock:ro iamthefij/dockron -watch
### Getting a Docker API Version error?
You might see something like the following error when Dockron connects to the Docker API
```
Error response from daemon: client version 1.47 is too new. Maximum supported API version is 1.45
```
This is because the API client library is newer than the version of the Docker API on your host. You can tell the Dockron API Client to use a compatible version by specifying `DOCKER_API_VERSION=1.45`, where the version you specify matches the API version shown when you run `docker version`. If you are running Dockron in Docker, make sure you add this to your compose environment or otherwise pass it to the container.
### Scheduling a container
First, be sure your container is something that is not long running and will actually exit when complete. This is for batch runs and not keeping a service running. Docker should be able to do that on it's own with a restart policy.
Create your container and add a label in the form `'dockron.schedule=* * * * *'`, where the value is a valid cron expression (See the section [Cron Expression Formatting](#cron-expression-formatting)).
Create your container and add a label in the form `dockron.schedule="* * * * *"`, where the value is a valid cron expression (See the section [Cron Expression Formatting](#cron-expression-formatting)).
Dockron will now start that container peridically on the schedule.
If you have a long running container that you'd like to schedule an exec command inside of, you can do so with labels as well. Add your job in the form `dockron.<job>.schedule=* * * * *` and `dockeron.<job>.command=echo hello`. Both labels are required to create an exec job.
Eg.
labels:
- "dockron.dates.schedule=* * * * *"
- "dockron.dates.command=date"
_Note: Exec jobs will log their output to Dockron. There is also currently no way to health check these._
### Cron Expression Formatting
For more information on the cron expression parsing, see the docs for [robfig/cron](https://godoc.org/github.com/robfig/cron).
@ -69,13 +41,3 @@ I intend to keep it simple as well. It will likely never:
* Handle job dependencies
Either use a separate tool in conjunction with Dockron, or use a more robust scheduler like Tron, or Chronos.
## Building
If you have go on your machine, you can simply use `make build` or `make run` to build and test Dockron. If you don't have go but you do have Docker, you can still build docker images using the provide multi-stage Dockerfile! You can kick that off with `make docker-staged-build`
There is also an example `docker-compose.yml` that will use the multi-stage build to ensure an easy sample. This can be run with `make docker-example`.
## Tests
There are now some basic tests as well as linting and integration tests. You can run all of these by executing `make all`.

View File

@ -1,26 +0,0 @@
---
version: '3'
services:
dockron:
build:
context: .
dockerfile: ./Dockerfile.multi-stage
command: ["-watch", "10s", "-debug"]
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
start_echoer:
image: busybox:latest
command: ["date"]
labels:
# Execute every minute
- 'dockron.schedule=* * * * *'
exec_echoer:
image: busybox:latest
command: sh -c "date > /out && tail -f /out"
labels:
# Execute every minute
- 'dockron.date.schedule=* * * * *'
- 'dockron.date.command=date >> /out'

39
go.mod
View File

@ -1,39 +0,0 @@
module github.com/iamthefij/dockron
go 1.22
toolchain go1.22.7
require (
git.iamthefij.com/iamthefij/slog v1.3.0
github.com/docker/docker v27.3.1+incompatible
github.com/robfig/cron/v3 v3.0.1
golang.org/x/net v0.29.0
)
require (
github.com/Microsoft/go-winio v0.4.14 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect
go.opentelemetry.io/otel v1.30.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 // indirect
go.opentelemetry.io/otel/metric v1.30.0 // indirect
go.opentelemetry.io/otel/sdk v1.30.0 // indirect
go.opentelemetry.io/otel/trace v1.30.0 // indirect
golang.org/x/sys v0.25.0 // indirect
golang.org/x/time v0.6.0 // indirect
gotest.tools/v3 v3.5.1 // indirect
)

127
go.sum
View File

@ -1,127 +0,0 @@
git.iamthefij.com/iamthefij/slog v1.3.0 h1:4Hu5PQvDrW5e3FrTS3q2iIXW0iPvhNY/9qJsqDR3K3I=
git.iamthefij.com/iamthefij/slog v1.3.0/go.mod h1:1RUj4hcCompZkAxXCRfUX786tb3cM/Zpkn97dGfUfbg=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI=
go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts=
go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8=
go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w=
go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ=
go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE=
go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg=
go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc=
go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.66.1 h1:hO5qAXR19+/Z44hmvIM4dQFMSYX9XcWsByfoxutBpAM=
google.golang.org/grpc v1.66.1/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=

View File

@ -1,32 +0,0 @@
---
version: '3'
services:
dockron:
build:
context: ../
dockerfile: ./Dockerfile.multi-stage
command: ["-watch", "10s", "-debug"]
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
DOCKER_API_VERSION: 1.45
start_echoer:
image: busybox:latest
command: sh -c "echo ok | tee -a /result.txt"
volumes:
- "./start_result.txt:/result.txt"
labels:
# Execute every minute
- 'dockron.schedule=* * * * *'
exec_echoer:
image: busybox:latest
command: sh -c "tail -f /result.txt"
volumes:
- "./exec_result.txt:/result.txt"
labels:
# Execute every minute
- 'dockron.test.schedule=* * * * *'
- 'dockron.test.command=echo ok | tee /result.txt && echo "Yay!"'

View File

@ -1,47 +0,0 @@
#! /bin/bash
set -e
# Change to itest dir
cd "$(dirname "$0")"
function check_results() {
local f=$1
local min=$2
awk "/ok/ { count=count+1 } END { print \"$f: Run count\", count; if (count < $min) { print \"Expected > $min\"; exit 1 } }" "$f"
}
function main() {
# Clear and create result files
echo "start" > ./start_result.txt
echo "start" > ./exec_result.txt
# Clean old containers
docker compose down || true
# Start containers
echo "Starting containers"
docker compose up -d --build
# Schedules run on the shortest interval of a minute. This should allow time
# for the containers to start and execute once
local seconds=$((65 - $(date +"%S")))
echo "Containers started. Sleeping for ${seconds}s to let schedules run"
sleep $seconds
echo "Stopping containers"
docker compose stop
# Print logs
docker compose logs
# Validate result shows minimum amount of executions
check_results ./start_result.txt 2
check_results ./exec_result.txt 1
# Check for exec output
if ! (docker compose logs | grep -q "Yay!"); then
echo "Exec output 'Yay!' not found"
exit 1
fi
}
main

365
main.go
View File

@ -1,373 +1,106 @@
package main
import (
"bufio"
"flag"
"fmt"
"os"
"regexp"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/robfig/cron"
"golang.org/x/net/context"
"strings"
"time"
"git.iamthefij.com/iamthefij/slog"
dockerTypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
dockerClient "github.com/docker/docker/client"
"github.com/robfig/cron/v3"
"golang.org/x/net/context"
)
var (
// defaultWatchInterval is the duration we should sleep until polling Docker
defaultWatchInterval = (1 * time.Minute)
// WatchInterval is the duration we should sleep until polling Docker
var DefaultWatchInterval = (1 * time.Minute)
// schedLabel is the string label to search for cron expressions
schedLabel = "dockron.schedule"
// execLabelRegex is will capture labels for an exec job
execLabelRegexp = regexp.MustCompile(`dockron\.([a-zA-Z0-9_-]+)\.(schedule|command)`)
// version of dockron being run
version = "dev"
)
// ContainerClient provides an interface for interracting with Docker. Makes it possible to mock in tests
type ContainerClient interface {
ContainerExecCreate(ctx context.Context, container string, config container.ExecOptions) (dockerTypes.IDResponse, error)
ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error)
ContainerExecStart(ctx context.Context, execID string, config container.ExecStartOptions) error
ContainerExecAttach(ctx context.Context, execID string, options container.ExecAttachOptions) (dockerTypes.HijackedResponse, error)
ContainerInspect(ctx context.Context, containerID string) (dockerTypes.ContainerJSON, error)
ContainerList(context context.Context, options container.ListOptions) ([]dockerTypes.Container, error)
ContainerStart(context context.Context, containerID string, options container.StartOptions) error
}
// ContainerCronJob is an interface of a job to run on containers
type ContainerCronJob interface {
Run()
Name() string
UniqueName() string
Schedule() string
}
// SchedLabel is the string label to search for cron expressions
var SchedLabel = "dockron.schedule"
// ContainerStartJob represents a scheduled container task
// It contains a reference to a client, the schedule to run on, and the
// ID of that container that should be started
type ContainerStartJob struct {
client ContainerClient
context context.Context
name string
containerID string
schedule string
Client *client.Client
ContainerID string
Context context.Context
Name string
Schedule string
}
// Run is executed based on the ContainerStartJob Schedule and starts the
// container
func (job ContainerStartJob) Run() {
slog.Infof("Starting: %s", job.name)
// Check if container is already running
containerJSON, err := job.client.ContainerInspect(
job.context,
job.containerID,
)
slog.OnErrPanicf(err, "Could not get container details for job %s", job.name)
if containerJSON.State.Running {
slog.Warningf("%s: Container is already running. Skipping start.", job.name)
return
}
// Start job
err = job.client.ContainerStart(
job.context,
job.containerID,
container.StartOptions{},
)
slog.OnErrPanicf(err, "Could not start container for job %s", job.name)
// Check results of job
for check := true; check; check = containerJSON.State.Running {
slog.Debugf("%s: Still running", job.name)
containerJSON, err = job.client.ContainerInspect(
job.context,
job.containerID,
)
slog.OnErrPanicf(err, "Could not get container details for job %s", job.name)
time.Sleep(1 * time.Second)
}
slog.Debugf("%s: Done running. %+v", job.name, containerJSON.State)
// Log exit code if failed
if containerJSON.State.ExitCode != 0 {
slog.Errorf(
"%s: Exec job exited with code %d",
job.name,
containerJSON.State.ExitCode,
)
}
}
// Name returns the name of the job
func (job ContainerStartJob) Name() string {
return job.name
}
// Schedule returns the schedule of the job
func (job ContainerStartJob) Schedule() string {
return job.schedule
}
// UniqueName returns a unique identifier for a container start job
func (job ContainerStartJob) UniqueName() string {
// ContainerID should be unique as a change in label will result in
// a new container as they are immutable
return job.name + "/" + job.containerID
}
// ContainerExecJob is a scheduled job to be executed in a running container
type ContainerExecJob struct {
ContainerStartJob
shellCommand string
}
// Run is executed based on the ContainerStartJob Schedule and starts the
// container
func (job ContainerExecJob) Run() {
slog.Infof("Execing: %s", job.name)
containerJSON, err := job.client.ContainerInspect(
job.context,
job.containerID,
)
slog.OnErrPanicf(err, "Could not get container details for job %s", job.name)
if !containerJSON.State.Running {
slog.Warningf("%s: Container not running. Skipping exec.", job.name)
return
}
execID, err := job.client.ContainerExecCreate(
job.context,
job.containerID,
container.ExecOptions{
AttachStdout: true,
AttachStderr: true,
Cmd: []string{"sh", "-c", strings.TrimSpace(job.shellCommand)},
},
)
slog.OnErrPanicf(err, "Could not create container exec job for %s", job.name)
hj, err := job.client.ContainerExecAttach(job.context, execID.ID, container.ExecAttachOptions{})
slog.OnErrWarnf(err, "%s: Error attaching to exec: %s", job.name, err)
defer hj.Close()
scanner := bufio.NewScanner(hj.Reader)
err = job.client.ContainerExecStart(
job.context,
execID.ID,
container.ExecStartOptions{},
)
slog.OnErrPanicf(err, "Could not start container exec job for %s", job.name)
// Wait for job results
execInfo := container.ExecInspect{Running: true}
for execInfo.Running {
time.Sleep(1 * time.Second)
slog.Debugf("Still execing %s", job.name)
execInfo, err = job.client.ContainerExecInspect(
job.context,
execID.ID,
)
// Maybe print output
if hj.Reader != nil {
for scanner.Scan() {
line := scanner.Text()
if len(line) > 0 {
slog.Infof("%s: Exec output: %s", job.name, line)
} else {
slog.Debugf("%s: Empty exec output", job.name)
}
if err := scanner.Err(); err != nil {
slog.OnErrWarnf(err, "%s: Error reading from exec", job.name)
}
}
} else {
slog.Debugf("%s: No exec reader", job.name)
}
slog.Debugf("%s: Exec info: %+v", job.name, execInfo)
if err != nil {
// Nothing we can do if we got an error here, so let's go
slog.OnErrWarnf(err, "%s: Could not get status for exec job", job.name)
return
}
}
slog.Debugf("%s: Done execing. %+v", job.name, execInfo)
// Log exit code if failed
if execInfo.ExitCode != 0 {
slog.Errorf("%s: Exec job existed with code %d", job.name, execInfo.ExitCode)
fmt.Println("Starting:", job.Name)
err := job.Client.ContainerStart(job.Context, job.ContainerID, types.ContainerStartOptions{})
if err != nil {
panic(err)
}
}
// QueryScheduledJobs queries Docker for all containers with a schedule and
// returns a list of ContainerCronJob records to be scheduled
func QueryScheduledJobs(client ContainerClient) (jobs []ContainerCronJob) {
slog.Debugf("Scanning containers for new schedules...")
containers, err := client.ContainerList(
context.Background(),
container.ListOptions{All: true},
)
slog.OnErrPanicf(err, "Failure querying docker containers")
// returns a list of ContainerStartJob records to be scheduled
func QueryScheduledJobs(cli *client.Client) (jobs []ContainerStartJob) {
fmt.Println("Scanning containers for new schedules...")
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{All: true})
if err != nil {
panic(err)
}
for _, container := range containers {
// Add start job
if val, ok := container.Labels[schedLabel]; ok {
if val, ok := container.Labels[SchedLabel]; ok {
jobName := strings.Join(container.Names, "/")
jobs = append(jobs, ContainerStartJob{
client: client,
containerID: container.ID,
context: context.Background(),
schedule: val,
name: jobName,
})
}
// Add exec jobs
execJobs := map[string]map[string]string{}
for label, value := range container.Labels {
results := execLabelRegexp.FindStringSubmatch(label)
expectedLabelParts := 3
if len(results) == expectedLabelParts {
// We've got part of a new job
jobName, jobField := results[1], results[2]
if partJob, ok := execJobs[jobName]; ok {
// Partial exists, add the other value
partJob[jobField] = value
} else {
// No partial exists, add this part
execJobs[jobName] = map[string]string{
jobField: value,
}
}
}
}
for jobName, jobConfig := range execJobs {
schedule, ok := jobConfig["schedule"]
if !ok {
continue
}
shellCommand, ok := jobConfig["command"]
if !ok {
continue
}
jobs = append(jobs, ContainerExecJob{
ContainerStartJob: ContainerStartJob{
client: client,
containerID: container.ID,
context: context.Background(),
schedule: schedule,
name: strings.Join(append(container.Names, jobName), "/"),
},
shellCommand: shellCommand,
Schedule: val,
Client: cli,
ContainerID: container.ID,
Context: context.Background(),
Name: jobName,
})
}
}
return jobs
return
}
// ScheduleJobs accepts a Cron instance and a list of jobs to schedule.
// It then schedules the provided jobs
func ScheduleJobs(c *cron.Cron, jobs []ContainerCronJob) {
// Fetch existing jobs from the cron
existingJobs := map[string]cron.EntryID{}
for _, entry := range c.Entries() {
// This should be safe since ContainerCronJob is the only type of job we use
existingJobs[entry.Job.(ContainerCronJob).UniqueName()] = entry.ID
}
func ScheduleJobs(c *cron.Cron, jobs []ContainerStartJob) {
for _, job := range jobs {
if _, ok := existingJobs[job.UniqueName()]; ok {
// Job already exists, remove it from existing jobs so we don't
// unschedule it later
slog.Debugf("Job %s is already scheduled. Skipping", job.Name())
delete(existingJobs, job.UniqueName())
continue
}
// Job doesn't exist yet, schedule it
_, err := c.AddJob(job.Schedule(), job)
if err == nil {
slog.Infof(
"Scheduled %s (%s) with schedule '%s'\n",
job.Name(),
job.UniqueName(),
job.Schedule(),
)
} else {
// TODO: Track something for a healthcheck here
slog.Errorf(
"Could not schedule %s (%s) with schedule '%s'. %v\n",
job.Name(),
job.UniqueName(),
job.Schedule(),
err,
)
}
}
// Remove remaining scheduled jobs that weren't in the new list
for _, entryID := range existingJobs {
c.Remove(entryID)
fmt.Printf("Scheduling %s (%s) with schedule '%s'\n", job.Name, job.ContainerID[:10], job.Schedule)
c.AddJob(job.Schedule, job)
}
}
func main() {
// Get a Docker Client
client, err := dockerClient.NewClientWithOpts(dockerClient.FromEnv)
slog.OnErrPanicf(err, "Could not create Docker client")
cli, err := client.NewEnvClient()
if err != nil {
panic(err)
}
// Read interval for polling Docker
var watchInterval time.Duration
showVersion := flag.Bool("version", false, "Display the version of dockron and exit")
flag.DurationVar(&watchInterval, "watch", defaultWatchInterval, "Interval used to poll Docker for changes")
flag.BoolVar(&slog.DebugLevel, "debug", false, "Show debug logs")
flag.DurationVar(&watchInterval, "watch", DefaultWatchInterval, "Interval used to poll Docker for changes")
flag.Parse()
// Print version if asked
if *showVersion {
fmt.Println("Dockron version:", version)
os.Exit(0)
}
// Create a Cron
c := cron.New()
c.Start()
// Start the loop
for {
// HACK: This is risky as it could fall on the same interval as a task and that task would get skipped
// It would be best to manage a ContainerID to Job mapping and then remove entries that are missing
// in the new list and add new entries. However, cron does not support this yet.
// Stop and create a new cron
c.Stop()
c = cron.New()
// Schedule jobs again
jobs := QueryScheduledJobs(client)
jobs := QueryScheduledJobs(cli)
ScheduleJobs(c, jobs)
c.Start()
// Sleep until the next query time
time.Sleep(watchInterval)

File diff suppressed because it is too large Load Diff