Compare commits

...

26 Commits

Author SHA1 Message Date
IamTheFij 301563c302 Update pipeline notify
continuous-integration/drone/push Build was killed Details
2020-08-19 14:09:37 -07:00
IamTheFij 4cdd1eab9e Update pipeline notify
continuous-integration/drone/push Build was killed Details
2020-08-19 14:08:01 -07:00
IamTheFij eac49e36b0 Remove itest
continuous-integration/drone/push Build is failing Details
2020-08-19 13:59:27 -07:00
IamTheFij 3de07349ed Make sure version is set when building final binaries 2020-08-19 13:57:49 -07:00
IamTheFij c742a324ed Add bash to drone itest
continuous-integration/drone/push Build is failing Details
2020-08-19 13:56:20 -07:00
IamTheFij a5406be70b More itest debugging
continuous-integration/drone/push Build is failing Details
2020-08-19 13:52:36 -07:00
IamTheFij bb0db6a9d6 Debug itest
continuous-integration/drone/push Build is failing Details
2020-08-19 13:51:36 -07:00
IamTheFij 0e09d2ca2c Fix hadolint install
continuous-integration/drone/push Build is failing Details
2020-08-19 13:47:54 -07:00
IamTheFij c9d19f9c01 Add git to itest
continuous-integration/drone/push Build is failing Details
Don't actually know why this is needed
2020-08-19 13:44:10 -07:00
IamTheFij 7e9f2e38ca Try to add hadolint back 2020-08-19 13:42:55 -07:00
IamTheFij 996d31b9eb Change to compose base
continuous-integration/drone/push Build is failing Details
2020-08-19 13:34:52 -07:00
IamTheFij e3431971a0 Try to add itest to drone build
continuous-integration/drone/push Build is failing Details
2020-08-19 13:29:53 -07:00
IamTheFij 4bf79f769e Skip hadolint in drone check
continuous-integration/drone/push Build is failing Details
2020-08-19 13:25:45 -07:00
IamTheFij 088bf2ab5e Switch to different python base image for checks
continuous-integration/drone/push Build is failing Details
2020-08-19 13:21:12 -07:00
IamTheFij 4ef3f0b431 Remove build step as build is part of test
continuous-integration/drone/push Build is failing Details
2020-08-19 13:16:50 -07:00
IamTheFij f4c7801d84 Install pre-commit for checks
continuous-integration/drone/push Build is failing Details
2020-08-19 13:15:45 -07:00
IamTheFij 0b8454bb80 Add drone email notifications
continuous-integration/drone/push Build is failing Details
2020-08-19 13:14:48 -07:00
IamTheFij 61171cc9eb Add tests to drone
continuous-integration/drone/push Build is failing Details
2020-08-19 13:12:17 -07:00
IamTheFij a1ba2060bb Add integration test 2020-08-19 13:04:37 -07:00
IamTheFij a572bd6635 Fix multi-stage build with slog 2020-08-19 12:51:30 -07:00
IamTheFij 1fc9672971 Update tests to 75%+ coverage
continuous-integration/drone/push Build is failing Details
2020-08-19 10:19:43 -07:00
IamTheFij 63532b39f8 WIP Yea tests!
continuous-integration/drone/push Build is failing Details
2020-08-18 22:07:41 -07:00
IamTheFij 73b7a12eb7 Update slog interfaces
continuous-integration/drone/push Build is failing Details
2020-08-17 15:34:47 -07:00
IamTheFij 32d681703c Add script to add documentation to readme 2020-08-16 16:13:25 -07:00
IamTheFij 4cb0b027cd Move logger module to a subpackage
This will allow me to use my logging helpers from another project
2020-08-16 15:03:09 -07:00
IamTheFij 3ee32b632a Add the ability to create execute jobs on long running containers 2020-08-16 14:46:35 -07:00
13 changed files with 1165 additions and 162 deletions

View File

@ -1,11 +1,28 @@
---
kind: pipeline
name: test
steps:
- name: build
image: golang:1.12
- name: test
image: golang:1.15
commands:
- make build
- make test
- name: check
image: python:3
commands:
- pip install docker-compose pre-commit
- wget -L -O /usr/bin/hadolint https://github.com/hadolint/hadolint/releases/download/v1.18.0/hadolint-Linux-x86_64
- chmod +x /usr/bin/hadolint
- make check
# - name: itest
# image: docker/compose:alpine-1.26.2
# environment:
# VERSION: ${DRONE_TAG:-${DRONE_COMMIT}}
# commands:
# - apk add make bash
# - make itest
---
kind: pipeline
@ -24,7 +41,9 @@ trigger:
steps:
- name: build
image: golang:1.12
image: golang:1.15
environment:
VERSION: ${DRONE_TAG:-${DRONE_COMMIT}}
commands:
- make build-linux-static
@ -77,3 +96,28 @@ steps:
from_secret: docker_username
password:
from_secret: docker_password
---
kind: pipeline
name: notify
depends_on:
- test
- publish
trigger:
status:
- failure
steps:
- name: notify
image: drillster/drone-email
settings:
host:
from_secret: SMTP_HOST # pragma: whitelist secret
username:
from_secret: SMTP_USER # pragma: whitelist secret
password:
from_secret: SMTP_PASS # pragma: whitelist secret
from: drone@iamthefij.com

3
.gitignore vendored
View File

@ -30,3 +30,6 @@ dockron
dockron-*
# deps
vendor/
# Test output
itest/*_result.txt

View File

@ -18,4 +18,4 @@ repos:
rev: v2.0.0
hooks:
- id: docker-compose-check
- id: hadolint
- id: hadolint-system

View File

@ -1,5 +1,5 @@
ARG REPO=library
FROM golang:1.12-alpine AS builder
FROM golang:1.15-alpine AS builder
# hadolint ignore=DL3018
RUN apk add --no-cache git
@ -11,6 +11,7 @@ COPY ./go.mod ./go.sum /app/
RUN go mod download
COPY ./main.go /app/
COPY ./slog /app/slog
ARG ARCH=amd64
ARG VERSION=dev

View File

@ -1,4 +1,3 @@
.PHONY: test all
DOCKER_TAG ?= dockron-dev-${USER}
GIT_TAG_NAME := $(shell git tag -l --contains HEAD)
GIT_SHA := $(shell git rev-parse HEAD)
@ -9,6 +8,9 @@ GOFILES = *.go go.mod go.sum
.PHONY: default
default: build
.PHONY: all
all: check test itest
# Downloads dependencies into vendor directory
vendor: $(GOFILES)
go mod vendor
@ -22,9 +24,13 @@ run:
test:
go test -coverprofile=coverage.out
go tool cover -func=coverage.out
# @go tool cover -func=coverage.out | awk -v target=80.0% \
@go tool cover -func=coverage.out | awk -v target=75.0% \
'/^total:/ { print "Total coverage: " $$3 " Minimum coverage: " target; if ($$3+0.0 >= target+0.0) print "ok"; else { print "fail"; exit 1; } }'
.PHONY: itest
itest:
./itest/itest.sh
# Installs pre-commit hooks
.PHONY: install-hooks
install-hooks:

View File

@ -10,9 +10,17 @@ services:
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
echoer:
start_echoer:
image: busybox:latest
command: ["date"]
labels:
# Execute every minute
- 'dockron.schedule=* * * * *'
exec_echoer:
image: busybox:latest
command: sh -c "date > /out && tail -f /out"
labels:
# Execute every minute
- 'dockron.date.schedule=* * * * *'
- 'dockron.date.command=date >> /out'

30
itest/docker-compose.yml Normal file
View File

@ -0,0 +1,30 @@
---
version: '3'
services:
dockron:
build:
context: ../
dockerfile: ./Dockerfile.multi-stage
command: ["-watch", "10s", "-debug"]
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
start_echoer:
image: busybox:latest
command: sh -c "echo ok | tee -a /result.txt"
volumes:
- "./start_result.txt:/result.txt"
labels:
# Execute every minute
- 'dockron.schedule=* * * * *'
exec_echoer:
image: busybox:latest
command: sh -c "tail -f /result.txt"
volumes:
- "./exec_result.txt:/result.txt"
labels:
# Execute every minute
- 'dockron.test.schedule=* * * * *'
- 'dockron.test.command=echo ok >> /result.txt'

36
itest/itest.sh Executable file
View File

@ -0,0 +1,36 @@
#! /bin/bash
set -e
# Change to itest dir
cd "$(dirname "$0")"
function check_results() {
local f=$1
local min=$2
awk "/ok/ { count=count+1 } END { print \"$f: Run count\", count; if (count < $min) { print \"Expected > $min\"; exit 1 } }" "$f"
}
function main() {
# Clear and create result files
echo "start" > ./start_result.txt
echo "start" > ./exec_result.txt
# Clean old containers
docker-compose down || true
# Start containers
echo "Starting containers"
docker-compose up -d --build
echo "Containers started. Sleeping for 70s to let schedules run"
# Schedules run on the shortest interval of a minute. This should allow time
# for the containers to start and execute once
sleep 70
echo "Stopping containers"
docker-compose stop
# Validate result shows minimum amount of executions
check_results ./start_result.txt 2
check_results ./exec_result.txt 1
}
main

245
main.go
View File

@ -3,13 +3,14 @@ package main
import (
"flag"
"fmt"
"log"
"os"
"regexp"
"strings"
"time"
dockerTypes "github.com/docker/docker/api/types"
dockerClient "github.com/docker/docker/client"
"github.com/iamthefij/dockron/slog"
"github.com/robfig/cron/v3"
"golang.org/x/net/context"
)
@ -20,75 +21,228 @@ var (
// schedLabel is the string label to search for cron expressions
schedLabel = "dockron.schedule"
// execLabelRegex is will capture labels for an exec job
execLabelRegexp = regexp.MustCompile(`dockron\.([a-zA-Z0-9_-]+)\.(schedule|command)`)
// version of dockron being run
version = "dev"
// Debug can be used to show or supress certain log messages
Debug = true
)
// ContainerClient provides an interface for interracting with Docker
type ContainerClient interface {
ContainerStart(context context.Context, containerID string, options dockerTypes.ContainerStartOptions) error
ContainerExecCreate(ctx context.Context, container string, config dockerTypes.ExecConfig) (dockerTypes.IDResponse, error)
ContainerExecInspect(ctx context.Context, execID string) (dockerTypes.ContainerExecInspect, error)
ContainerExecStart(ctx context.Context, execID string, config dockerTypes.ExecStartCheck) error
ContainerInspect(ctx context.Context, containerID string) (dockerTypes.ContainerJSON, error)
ContainerList(context context.Context, options dockerTypes.ContainerListOptions) ([]dockerTypes.Container, error)
ContainerStart(context context.Context, containerID string, options dockerTypes.ContainerStartOptions) error
}
// ContainerCronJob is an interface of a job to run on containers
type ContainerCronJob interface {
Run()
Name() string
UniqueName() string
Schedule() string
}
// ContainerStartJob represents a scheduled container task
// It contains a reference to a client, the schedule to run on, and the
// ID of that container that should be started
type ContainerStartJob struct {
Client ContainerClient
ContainerID string
Context context.Context
Name string
Schedule string
client ContainerClient
context context.Context
name string
containerID string
schedule string
}
// Run is executed based on the ContainerStartJob Schedule and starts the
// container
func (job ContainerStartJob) Run() {
log.Println("Starting:", job.Name)
err := job.Client.ContainerStart(
job.Context,
job.ContainerID,
slog.Info("Starting: %s", job.name)
// Check if container is already running
containerJSON, err := job.client.ContainerInspect(
job.context,
job.containerID,
)
slog.PanicOnErr(err, "Could not get container details for job %s", job.name)
if containerJSON.State.Running {
slog.Warning("Container is already running. Skipping %s", job.name)
return
}
// Start job
err = job.client.ContainerStart(
job.context,
job.containerID,
dockerTypes.ContainerStartOptions{},
)
if err != nil {
panic(err)
slog.PanicOnErr(err, "Could not start container for job %s", job.name)
// Check results of job
for check := true; check; check = containerJSON.State.Running {
slog.Debug("Still running %s", job.name)
containerJSON, err = job.client.ContainerInspect(
job.context,
job.containerID,
)
slog.PanicOnErr(err, "Could not get container details for job %s", job.name)
time.Sleep(1 * time.Second)
}
slog.Debug("Done execing %s. %+v", job.name, containerJSON.State)
// Log exit code if failed
if containerJSON.State.ExitCode != 0 {
slog.Error(
"Exec job %s existed with code %d",
job.name,
containerJSON.State.ExitCode,
)
}
}
// Name returns the name of the job
func (job ContainerStartJob) Name() string {
return job.name
}
// Schedule returns the schedule of the job
func (job ContainerStartJob) Schedule() string {
return job.schedule
}
// UniqueName returns a unique identifier for a container start job
func (job ContainerStartJob) UniqueName() string {
// ContainerID should be unique as a change in label will result in
// a new container as they are immutable
return job.ContainerID
return job.name + "/" + job.containerID
}
// ContainerExecJob is a scheduled job to be executed in a running container
type ContainerExecJob struct {
ContainerStartJob
shellCommand string
}
// Run is executed based on the ContainerStartJob Schedule and starts the
// container
func (job ContainerExecJob) Run() {
slog.Info("Execing: %s", job.name)
containerJSON, err := job.client.ContainerInspect(
job.context,
job.containerID,
)
slog.PanicOnErr(err, "Could not get container details for job %s", job.name)
if !containerJSON.State.Running {
slog.Warning("Container not running. Skipping %s", job.name)
return
}
execID, err := job.client.ContainerExecCreate(
job.context,
job.containerID,
dockerTypes.ExecConfig{
Cmd: []string{"sh", "-c", strings.TrimSpace(job.shellCommand)},
},
)
slog.PanicOnErr(err, "Could not create container exec job for %s", job.name)
err = job.client.ContainerExecStart(
job.context,
execID.ID,
dockerTypes.ExecStartCheck{},
)
slog.PanicOnErr(err, "Could not start container exec job for %s", job.name)
// Wait for job results
execInfo := dockerTypes.ContainerExecInspect{Running: true}
for execInfo.Running {
slog.Debug("Still execing %s", job.name)
execInfo, err = job.client.ContainerExecInspect(
job.context,
execID.ID,
)
slog.Debug("Exec info: %+v", execInfo)
if err != nil {
// Nothing we can do if we got an error here, so let's go
slog.WarnOnErr(err, "Could not get status for exec job %s", job.name)
return
}
time.Sleep(1 * time.Second)
}
slog.Debug("Done execing %s. %+v", job.name, execInfo)
// Log exit code if failed
if execInfo.ExitCode != 0 {
slog.Error("Exec job %s existed with code %d", job.name, execInfo.ExitCode)
}
}
// QueryScheduledJobs queries Docker for all containers with a schedule and
// returns a list of ContainerStartJob records to be scheduled
func QueryScheduledJobs(client ContainerClient) (jobs []ContainerStartJob) {
if Debug {
log.Println("Scanning containers for new schedules...")
}
// returns a list of ContainerCronJob records to be scheduled
func QueryScheduledJobs(client ContainerClient) (jobs []ContainerCronJob) {
slog.Debug("Scanning containers for new schedules...")
containers, err := client.ContainerList(
context.Background(),
dockerTypes.ContainerListOptions{All: true},
)
if err != nil {
panic(err)
}
slog.PanicOnErr(err, "Failure querying docker containers")
for _, container := range containers {
// Add start job
if val, ok := container.Labels[schedLabel]; ok {
jobName := strings.Join(container.Names, "/")
jobs = append(jobs, ContainerStartJob{
Schedule: val,
Client: client,
ContainerID: container.ID,
Context: context.Background(),
Name: jobName,
client: client,
containerID: container.ID,
context: context.Background(),
schedule: val,
name: jobName,
})
}
// Add exec jobs
execJobs := map[string]map[string]string{}
for label, value := range container.Labels {
results := execLabelRegexp.FindStringSubmatch(label)
if len(results) == 3 {
// We've got part of a new job
jobName, jobField := results[1], results[2]
if partJob, ok := execJobs[jobName]; ok {
// Partial exists, add the other value
partJob[jobField] = value
} else {
// No partial exists, add this part
execJobs[jobName] = map[string]string{
jobField: value,
}
}
}
}
for jobName, jobConfig := range execJobs {
schedule, ok := jobConfig["schedule"]
if !ok {
continue
}
shellCommand, ok := jobConfig["command"]
if !ok {
continue
}
jobs = append(jobs, ContainerExecJob{
ContainerStartJob: ContainerStartJob{
client: client,
containerID: container.ID,
context: context.Background(),
schedule: schedule,
name: strings.Join(append(container.Names, jobName), "/"),
},
shellCommand: shellCommand,
})
}
}
@ -98,40 +252,39 @@ func QueryScheduledJobs(client ContainerClient) (jobs []ContainerStartJob) {
// ScheduleJobs accepts a Cron instance and a list of jobs to schedule.
// It then schedules the provided jobs
func ScheduleJobs(c *cron.Cron, jobs []ContainerStartJob) {
func ScheduleJobs(c *cron.Cron, jobs []ContainerCronJob) {
// Fetch existing jobs from the cron
existingJobs := map[string]cron.EntryID{}
for _, entry := range c.Entries() {
existingJobs[entry.Job.(ContainerStartJob).UniqueName()] = entry.ID
// This should be safe since ContainerCronJob is the only type of job we use
existingJobs[entry.Job.(ContainerCronJob).UniqueName()] = entry.ID
}
for _, job := range jobs {
if _, ok := existingJobs[job.UniqueName()]; ok {
// Job already exists, remove it from existing jobs so we don't
// unschedule it later
if Debug {
log.Printf("Job %s is already scheduled. Skipping", job.Name)
}
slog.Debug("Job %s is already scheduled. Skipping", job.Name())
delete(existingJobs, job.UniqueName())
continue
}
// Job doesn't exist yet, schedule it
_, err := c.AddJob(job.Schedule, job)
_, err := c.AddJob(job.Schedule(), job)
if err == nil {
log.Printf(
slog.Info(
"Scheduled %s (%s) with schedule '%s'\n",
job.Name,
job.ContainerID[:10],
job.Schedule,
job.Name(),
job.UniqueName(),
job.Schedule(),
)
} else {
// TODO: Track something for a healthcheck here
log.Printf(
"Error scheduling %s (%s) with schedule '%s'. %v\n",
job.Name,
job.ContainerID[:10],
job.Schedule,
slog.Error(
"Could not schedule %s (%s) with schedule '%s'. %v\n",
job.Name(),
job.UniqueName(),
job.Schedule(),
err,
)
}
@ -154,7 +307,7 @@ func main() {
var watchInterval time.Duration
flag.DurationVar(&watchInterval, "watch", defaultWatchInterval, "Interval used to poll Docker for changes")
var showVersion = flag.Bool("version", false, "Display the version of dockron and exit")
flag.BoolVar(&Debug, "debug", false, "Show debug logs")
flag.BoolVar(&slog.DebugLevel, "debug", false, "Show debug logs")
flag.Parse()
// Print version if asked

View File

@ -3,6 +3,8 @@ package main
import (
"fmt"
"log"
"reflect"
"sort"
"testing"
dockerTypes "github.com/docker/docker/api/types"
@ -10,28 +12,129 @@ import (
"golang.org/x/net/context"
)
var (
// ContainerJSON results for a running container
runningContainerInfo = dockerTypes.ContainerJSON{
ContainerJSONBase: &dockerTypes.ContainerJSONBase{
State: &dockerTypes.ContainerState{
Running: true,
},
},
}
// ContainerJSON results for a stopped container
stoppedContainerInfo = dockerTypes.ContainerJSON{
ContainerJSONBase: &dockerTypes.ContainerJSONBase{
State: &dockerTypes.ContainerState{
Running: false,
},
},
}
)
// FakeCall represents a faked method call
type FakeCall []interface{}
// FakeResult gives results of a fake method
type FakeResult []interface{}
// FakeDockerClient is used to test without interracting with Docker
type FakeDockerClient struct {
FakeContainers []dockerTypes.Container
FakeResults map[string][]FakeResult
FakeCalls map[string][]FakeCall
}
// ContainerStart pretends to start a container
func (fakeClient *FakeDockerClient) ContainerStart(context context.Context, containerID string, options dockerTypes.ContainerStartOptions) error {
return nil
// AssertFakeCalls checks expected against actual calls to fake methods
func (fakeClient FakeDockerClient) AssertFakeCalls(t *testing.T, expectedCalls map[string][]FakeCall, message string) {
if !reflect.DeepEqual(fakeClient.FakeCalls, expectedCalls) {
t.Errorf(
"%s: Expected and actual calls do not match. Expected %+v Actual %+v",
message,
expectedCalls,
fakeClient.FakeCalls,
)
}
}
func (fakeClient *FakeDockerClient) ContainerList(context context.Context, options dockerTypes.ContainerListOptions) ([]dockerTypes.Container, error) {
return fakeClient.FakeContainers, nil
// called is a helper method to get return values and log the method call
func (fakeClient *FakeDockerClient) called(method string, v ...interface{}) FakeResult {
if fakeClient.FakeCalls == nil {
fakeClient.FakeCalls = map[string][]FakeCall{}
}
// Log method call
fakeClient.FakeCalls[method] = append(fakeClient.FakeCalls[method], v)
// Get fake results
results := fakeClient.FakeResults[method][0]
// Remove fake result
fakeClient.FakeResults[method] = fakeClient.FakeResults[method][1:]
// Return fake results
return results
}
func (fakeClient *FakeDockerClient) ContainerStart(context context.Context, containerID string, options dockerTypes.ContainerStartOptions) (e error) {
results := fakeClient.called("ContainerStart", context, containerID, options)
if results[0] != nil {
e = results[0].(error)
}
return
}
func (fakeClient *FakeDockerClient) ContainerList(context context.Context, options dockerTypes.ContainerListOptions) (c []dockerTypes.Container, e error) {
results := fakeClient.called("ContainerList", context, options)
if results[0] != nil {
c = results[0].([]dockerTypes.Container)
}
if results[1] != nil {
e = results[1].(error)
}
return
}
func (fakeClient *FakeDockerClient) ContainerExecCreate(ctx context.Context, container string, config dockerTypes.ExecConfig) (r dockerTypes.IDResponse, e error) {
results := fakeClient.called("ContainerExecCreate", ctx, container, config)
if results[0] != nil {
r = results[0].(dockerTypes.IDResponse)
}
if results[1] != nil {
e = results[1].(error)
}
return
}
func (fakeClient *FakeDockerClient) ContainerExecStart(ctx context.Context, execID string, config dockerTypes.ExecStartCheck) (e error) {
results := fakeClient.called("ContainerExecStart", ctx, execID, config)
if results[0] != nil {
e = results[0].(error)
}
return
}
func (fakeClient *FakeDockerClient) ContainerExecInspect(ctx context.Context, execID string) (r dockerTypes.ContainerExecInspect, e error) {
results := fakeClient.called("ContainerExecInspect", ctx, execID)
if results[0] != nil {
r = results[0].(dockerTypes.ContainerExecInspect)
}
if results[1] != nil {
e = results[1].(error)
}
return
}
func (fakeClient *FakeDockerClient) ContainerInspect(ctx context.Context, containerID string) (r dockerTypes.ContainerJSON, e error) {
results := fakeClient.called("ContainerInspect", ctx, containerID)
if results[0] != nil {
r = results[0].(dockerTypes.ContainerJSON)
}
if results[1] != nil {
e = results[1].(error)
}
return
}
// NewFakeDockerClient creates an empty client
func NewFakeDockerClient() *FakeDockerClient {
return &FakeDockerClient{}
}
// NewFakeDockerClientWithContainers creates a client with the provided containers
func NewFakeDockerClientWithContainers(containers []dockerTypes.Container) *FakeDockerClient {
return &FakeDockerClient{FakeContainers: containers}
return &FakeDockerClient{
FakeResults: map[string][]FakeResult{},
}
}
// ErrorUnequal checks that two values are equal and fails the test if not
@ -49,12 +152,12 @@ func TestQueryScheduledJobs(t *testing.T) {
cases := []struct {
name string
fakeContainers []dockerTypes.Container
expectedJobs []ContainerStartJob
expectedJobs []ContainerCronJob
}{
{
name: "No containers",
fakeContainers: []dockerTypes.Container{},
expectedJobs: []ContainerStartJob{},
expectedJobs: []ContainerCronJob{},
},
{
name: "One container without schedule",
@ -64,7 +167,7 @@ func TestQueryScheduledJobs(t *testing.T) {
ID: "no_schedule_1",
},
},
expectedJobs: []ContainerStartJob{},
expectedJobs: []ContainerCronJob{},
},
{
name: "One container with schedule",
@ -77,13 +180,13 @@ func TestQueryScheduledJobs(t *testing.T) {
},
},
},
expectedJobs: []ContainerStartJob{
expectedJobs: []ContainerCronJob{
ContainerStartJob{
Name: "has_schedule_1",
ContainerID: "has_schedule_1",
Schedule: "* * * * *",
Context: context.Background(),
Client: client,
name: "has_schedule_1",
containerID: "has_schedule_1",
schedule: "* * * * *",
context: context.Background(),
client: client,
},
},
},
@ -102,13 +205,101 @@ func TestQueryScheduledJobs(t *testing.T) {
},
},
},
expectedJobs: []ContainerStartJob{
expectedJobs: []ContainerCronJob{
ContainerStartJob{
Name: "has_schedule_1",
ContainerID: "has_schedule_1",
Schedule: "* * * * *",
Context: context.Background(),
Client: client,
name: "has_schedule_1",
containerID: "has_schedule_1",
schedule: "* * * * *",
context: context.Background(),
client: client,
},
},
},
{
name: "Incomplete exec job, schedule only",
fakeContainers: []dockerTypes.Container{
dockerTypes.Container{
Names: []string{"exec_job_1"},
ID: "exec_job_1",
Labels: map[string]string{
"dockron.test.schedule": "* * * * *",
},
},
},
expectedJobs: []ContainerCronJob{},
},
{
name: "Incomplete exec job, command only",
fakeContainers: []dockerTypes.Container{
dockerTypes.Container{
Names: []string{"exec_job_1"},
ID: "exec_job_1",
Labels: map[string]string{
"dockron.test.command": "date",
},
},
},
expectedJobs: []ContainerCronJob{},
},
{
name: "Complete exec job",
fakeContainers: []dockerTypes.Container{
dockerTypes.Container{
Names: []string{"exec_job_1"},
ID: "exec_job_1",
Labels: map[string]string{
"dockron.test.schedule": "* * * * *",
"dockron.test.command": "date",
},
},
},
expectedJobs: []ContainerCronJob{
ContainerExecJob{
ContainerStartJob: ContainerStartJob{
name: "exec_job_1/test",
containerID: "exec_job_1",
schedule: "* * * * *",
context: context.Background(),
client: client,
},
shellCommand: "date",
},
},
},
{
name: "Dual exec jobs on single container",
fakeContainers: []dockerTypes.Container{
dockerTypes.Container{
Names: []string{"exec_job_1"},
ID: "exec_job_1",
Labels: map[string]string{
"dockron.test1.schedule": "* * * * *",
"dockron.test1.command": "date",
"dockron.test2.schedule": "* * * * *",
"dockron.test2.command": "date",
},
},
},
expectedJobs: []ContainerCronJob{
ContainerExecJob{
ContainerStartJob: ContainerStartJob{
name: "exec_job_1/test1",
containerID: "exec_job_1",
schedule: "* * * * *",
context: context.Background(),
client: client,
},
shellCommand: "date",
},
ContainerExecJob{
ContainerStartJob: ContainerStartJob{
name: "exec_job_1/test2",
containerID: "exec_job_1",
schedule: "* * * * *",
context: context.Background(),
client: client,
},
shellCommand: "date",
},
},
},
@ -120,12 +311,17 @@ func TestQueryScheduledJobs(t *testing.T) {
// Load fake containers
t.Logf("Fake containers: %+v", c.fakeContainers)
client.FakeContainers = c.fakeContainers
client.FakeResults["ContainerList"] = []FakeResult{
FakeResult{c.fakeContainers, nil},
}
jobs := QueryScheduledJobs(client)
// Sort so we can compare each list of jobs
sort.Slice(jobs, func(i, j int) bool {
return jobs[i].UniqueName() < jobs[j].UniqueName()
})
t.Logf("Expected jobs: %+v, Actual jobs: %+v", c.expectedJobs, jobs)
ErrorUnequal(t, len(c.expectedJobs), len(jobs), "Job lengths don't match")
for i, job := range jobs {
ErrorUnequal(t, c.expectedJobs[i], job, "Job value does not match")
@ -142,82 +338,82 @@ func TestScheduleJobs(t *testing.T) {
// Tests must be executed sequentially!
cases := []struct {
name string
queriedJobs []ContainerStartJob
expectedJobs []ContainerStartJob
queriedJobs []ContainerCronJob
expectedJobs []ContainerCronJob
}{
{
name: "No containers",
queriedJobs: []ContainerStartJob{},
expectedJobs: []ContainerStartJob{},
queriedJobs: []ContainerCronJob{},
expectedJobs: []ContainerCronJob{},
},
{
name: "One container with schedule",
queriedJobs: []ContainerStartJob{
queriedJobs: []ContainerCronJob{
ContainerStartJob{
Name: "has_schedule_1",
ContainerID: "has_schedule_1",
Schedule: "* * * * *",
name: "has_schedule_1",
containerID: "has_schedule_1",
schedule: "* * * * *",
},
},
expectedJobs: []ContainerStartJob{
expectedJobs: []ContainerCronJob{
ContainerStartJob{
Name: "has_schedule_1",
ContainerID: "has_schedule_1",
Schedule: "* * * * *",
name: "has_schedule_1",
containerID: "has_schedule_1",
schedule: "* * * * *",
},
},
},
{
name: "Add a second job",
queriedJobs: []ContainerStartJob{
queriedJobs: []ContainerCronJob{
ContainerStartJob{
Name: "has_schedule_1",
ContainerID: "has_schedule_1",
Schedule: "* * * * *",
name: "has_schedule_1",
containerID: "has_schedule_1",
schedule: "* * * * *",
},
ContainerStartJob{
Name: "has_schedule_2",
ContainerID: "has_schedule_2",
Schedule: "* * * * *",
name: "has_schedule_2",
containerID: "has_schedule_2",
schedule: "* * * * *",
},
},
expectedJobs: []ContainerStartJob{
expectedJobs: []ContainerCronJob{
ContainerStartJob{
Name: "has_schedule_1",
ContainerID: "has_schedule_1",
Schedule: "* * * * *",
name: "has_schedule_1",
containerID: "has_schedule_1",
schedule: "* * * * *",
},
ContainerStartJob{
Name: "has_schedule_2",
ContainerID: "has_schedule_2",
Schedule: "* * * * *",
name: "has_schedule_2",
containerID: "has_schedule_2",
schedule: "* * * * *",
},
},
},
{
name: "Replace job 1",
queriedJobs: []ContainerStartJob{
queriedJobs: []ContainerCronJob{
ContainerStartJob{
Name: "has_schedule_1",
ContainerID: "has_schedule_1_prime",
Schedule: "* * * * *",
name: "has_schedule_1",
containerID: "has_schedule_1_prime",
schedule: "* * * * *",
},
ContainerStartJob{
Name: "has_schedule_2",
ContainerID: "has_schedule_2",
Schedule: "* * * * *",
name: "has_schedule_2",
containerID: "has_schedule_2",
schedule: "* * * * *",
},
},
expectedJobs: []ContainerStartJob{
expectedJobs: []ContainerCronJob{
ContainerStartJob{
Name: "has_schedule_2",
ContainerID: "has_schedule_2",
Schedule: "* * * * *",
name: "has_schedule_2",
containerID: "has_schedule_2",
schedule: "* * * * *",
},
ContainerStartJob{
Name: "has_schedule_1",
ContainerID: "has_schedule_1_prime",
Schedule: "* * * * *",
name: "has_schedule_1",
containerID: "has_schedule_1_prime",
schedule: "* * * * *",
},
},
},
@ -253,12 +449,12 @@ func TestDoLoop(t *testing.T) {
cases := []struct {
name string
fakeContainers []dockerTypes.Container
expectedJobs []ContainerStartJob
expectedJobs []ContainerCronJob
}{
{
name: "No containers",
fakeContainers: []dockerTypes.Container{},
expectedJobs: []ContainerStartJob{},
expectedJobs: []ContainerCronJob{},
},
{
name: "One container without schedule",
@ -268,7 +464,7 @@ func TestDoLoop(t *testing.T) {
ID: "no_schedule_1",
},
},
expectedJobs: []ContainerStartJob{},
expectedJobs: []ContainerCronJob{},
},
{
name: "One container with schedule",
@ -281,13 +477,13 @@ func TestDoLoop(t *testing.T) {
},
},
},
expectedJobs: []ContainerStartJob{
expectedJobs: []ContainerCronJob{
ContainerStartJob{
Name: "has_schedule_1",
ContainerID: "has_schedule_1",
Schedule: "* * * * *",
Context: context.Background(),
Client: client,
name: "has_schedule_1",
containerID: "has_schedule_1",
schedule: "* * * * *",
context: context.Background(),
client: client,
},
},
},
@ -306,13 +502,13 @@ func TestDoLoop(t *testing.T) {
},
},
},
expectedJobs: []ContainerStartJob{
expectedJobs: []ContainerCronJob{
ContainerStartJob{
Name: "has_schedule_1",
ContainerID: "has_schedule_1",
Schedule: "* * * * *",
Context: context.Background(),
Client: client,
name: "has_schedule_1",
containerID: "has_schedule_1",
schedule: "* * * * *",
context: context.Background(),
client: client,
},
},
},
@ -334,20 +530,20 @@ func TestDoLoop(t *testing.T) {
},
},
},
expectedJobs: []ContainerStartJob{
expectedJobs: []ContainerCronJob{
ContainerStartJob{
Name: "has_schedule_1",
ContainerID: "has_schedule_1",
Schedule: "* * * * *",
Context: context.Background(),
Client: client,
name: "has_schedule_1",
containerID: "has_schedule_1",
schedule: "* * * * *",
context: context.Background(),
client: client,
},
ContainerStartJob{
Name: "has_schedule_2",
ContainerID: "has_schedule_2",
Schedule: "* * * * *",
Context: context.Background(),
Client: client,
name: "has_schedule_2",
containerID: "has_schedule_2",
schedule: "* * * * *",
context: context.Background(),
client: client,
},
},
},
@ -369,20 +565,53 @@ func TestDoLoop(t *testing.T) {
},
},
},
expectedJobs: []ContainerStartJob{
expectedJobs: []ContainerCronJob{
ContainerStartJob{
Name: "has_schedule_2",
ContainerID: "has_schedule_2",
Schedule: "* * * * *",
Context: context.Background(),
Client: client,
name: "has_schedule_2",
containerID: "has_schedule_2",
schedule: "* * * * *",
context: context.Background(),
client: client,
},
ContainerStartJob{
Name: "has_schedule_1",
ContainerID: "has_schedule_1_prime",
Schedule: "* * * * *",
Context: context.Background(),
Client: client,
name: "has_schedule_1",
containerID: "has_schedule_1_prime",
schedule: "* * * * *",
context: context.Background(),
client: client,
},
},
},
{
name: "Remove second container and add exec to first",
fakeContainers: []dockerTypes.Container{
dockerTypes.Container{
Names: []string{"has_schedule_1"},
ID: "has_schedule_1_prime",
Labels: map[string]string{
"dockron.schedule": "* * * * *",
"dockron.test.schedule": "* * * * *",
"dockron.test.command": "date",
},
},
},
expectedJobs: []ContainerCronJob{
ContainerStartJob{
name: "has_schedule_1",
containerID: "has_schedule_1_prime",
schedule: "* * * * *",
context: context.Background(),
client: client,
},
ContainerExecJob{
ContainerStartJob: ContainerStartJob{
name: "has_schedule_1/test",
containerID: "has_schedule_1_prime",
schedule: "* * * * *",
context: context.Background(),
client: client,
},
shellCommand: "date",
},
},
},
@ -394,7 +623,9 @@ func TestDoLoop(t *testing.T) {
// Load fake containers
t.Logf("Fake containers: %+v", c.fakeContainers)
client.FakeContainers = c.fakeContainers
client.FakeResults["ContainerList"] = []FakeResult{
FakeResult{c.fakeContainers, nil},
}
// Execute loop iteration loop
jobs := QueryScheduledJobs(client)
@ -415,3 +646,342 @@ func TestDoLoop(t *testing.T) {
// Make sure the cron stops
croner.Stop()
}
// TestRunExecJobs does some verification on handling of exec jobs
// These tests aren't great because there are no return values to check
// but some test is better than no test! Future maybe these can be moved
// to a subpackage that offers a single function for interfacing with the
// Docker client to start or exec a container so that Dockron needn't care.
func TestRunExecJobs(t *testing.T) {
var jobContext context.Context
jobContainerID := "container_id"
jobCommand := "true"
cases := []struct {
name string
client *FakeDockerClient
expectPanic bool
expectedCalls map[string][]FakeCall
}{
{
name: "Initial inspect call raises error",
client: &FakeDockerClient{
FakeResults: map[string][]FakeResult{
"ContainerInspect": []FakeResult{
FakeResult{nil, fmt.Errorf("error")},
},
},
},
expectedCalls: map[string][]FakeCall{
"ContainerInspect": []FakeCall{
FakeCall{jobContext, jobContainerID},
},
},
expectPanic: true,
},
{
name: "Handle container not running",
client: &FakeDockerClient{
FakeResults: map[string][]FakeResult{
"ContainerInspect": []FakeResult{
FakeResult{stoppedContainerInfo, nil},
},
},
},
expectedCalls: map[string][]FakeCall{
"ContainerInspect": []FakeCall{
FakeCall{jobContext, jobContainerID},
},
},
},
{
name: "Handle error creating exec",
client: &FakeDockerClient{
FakeResults: map[string][]FakeResult{
"ContainerInspect": []FakeResult{
FakeResult{runningContainerInfo, nil},
},
"ContainerExecCreate": []FakeResult{
FakeResult{nil, fmt.Errorf("fail")},
},
},
},
expectedCalls: map[string][]FakeCall{
"ContainerInspect": []FakeCall{
FakeCall{jobContext, jobContainerID},
},
"ContainerExecCreate": []FakeCall{
FakeCall{
jobContext,
jobContainerID,
dockerTypes.ExecConfig{
Cmd: []string{"sh", "-c", jobCommand},
},
},
},
},
expectPanic: true,
},
{
name: "Fail starting exec container",
client: &FakeDockerClient{
FakeResults: map[string][]FakeResult{
"ContainerInspect": []FakeResult{
FakeResult{runningContainerInfo, nil},
},
"ContainerExecCreate": []FakeResult{
FakeResult{dockerTypes.IDResponse{ID: "id"}, nil},
},
"ContainerExecStart": []FakeResult{
FakeResult{fmt.Errorf("fail")},
},
},
},
expectedCalls: map[string][]FakeCall{
"ContainerInspect": []FakeCall{
FakeCall{jobContext, jobContainerID},
},
"ContainerExecCreate": []FakeCall{
FakeCall{
jobContext,
jobContainerID,
dockerTypes.ExecConfig{
Cmd: []string{"sh", "-c", jobCommand},
},
},
},
"ContainerExecStart": []FakeCall{
FakeCall{jobContext, "id", dockerTypes.ExecStartCheck{}},
},
},
expectPanic: true,
},
{
name: "Successfully start an exec job fail on status",
client: &FakeDockerClient{
FakeResults: map[string][]FakeResult{
"ContainerInspect": []FakeResult{
FakeResult{runningContainerInfo, nil},
},
"ContainerExecCreate": []FakeResult{
FakeResult{dockerTypes.IDResponse{ID: "id"}, nil},
},
"ContainerExecStart": []FakeResult{
FakeResult{nil},
},
"ContainerExecInspect": []FakeResult{
FakeResult{nil, fmt.Errorf("fail")},
},
},
},
expectedCalls: map[string][]FakeCall{
"ContainerInspect": []FakeCall{
FakeCall{jobContext, jobContainerID},
},
"ContainerExecCreate": []FakeCall{
FakeCall{
jobContext,
jobContainerID,
dockerTypes.ExecConfig{
Cmd: []string{"sh", "-c", jobCommand},
},
},
},
"ContainerExecStart": []FakeCall{
FakeCall{jobContext, "id", dockerTypes.ExecStartCheck{}},
},
"ContainerExecInspect": []FakeCall{
FakeCall{jobContext, "id"},
},
},
},
{
name: "Successfully start an exec job and run to completion",
client: &FakeDockerClient{
FakeResults: map[string][]FakeResult{
"ContainerInspect": []FakeResult{
FakeResult{runningContainerInfo, nil},
},
"ContainerExecCreate": []FakeResult{
FakeResult{dockerTypes.IDResponse{ID: "id"}, nil},
},
"ContainerExecStart": []FakeResult{
FakeResult{nil},
},
"ContainerExecInspect": []FakeResult{
FakeResult{dockerTypes.ContainerExecInspect{Running: true}, nil},
FakeResult{dockerTypes.ContainerExecInspect{Running: false}, nil},
},
},
},
expectedCalls: map[string][]FakeCall{
"ContainerInspect": []FakeCall{
FakeCall{jobContext, jobContainerID},
},
"ContainerExecCreate": []FakeCall{
FakeCall{
jobContext,
jobContainerID,
dockerTypes.ExecConfig{
Cmd: []string{"sh", "-c", jobCommand},
},
},
},
"ContainerExecStart": []FakeCall{
FakeCall{jobContext, "id", dockerTypes.ExecStartCheck{}},
},
"ContainerExecInspect": []FakeCall{
FakeCall{jobContext, "id"},
FakeCall{jobContext, "id"},
},
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
log.Printf("Running %s", t.Name())
// Create test job
job := ContainerExecJob{
ContainerStartJob: ContainerStartJob{
name: "test_job",
context: jobContext,
client: c.client,
containerID: jobContainerID,
},
shellCommand: jobCommand,
}
defer func() {
// Recover from panics, if there were any
if err := recover(); err != nil {
t.Log("Recovered from panic")
t.Log(err)
}
c.client.AssertFakeCalls(t, c.expectedCalls, "Failed")
}()
job.Run()
if c.expectPanic {
t.Errorf("Expected panic but got none")
}
})
}
}
// TestRunStartJobs does some verification on handling of start jobs
// These tests aren't great because there are no return values to check
// but some test is better than no test! Future maybe these can be moved
// to a subpackage that offers a single function for interfacing with the
// Docker client to start or exec a container so that Dockron needn't care.
func TestRunStartJobs(t *testing.T) {
var jobContext context.Context
jobContainerID := "container_id"
cases := []struct {
name string
client *FakeDockerClient
expectPanic bool
expectedCalls map[string][]FakeCall
}{
{
name: "Initial inspect call raises error",
client: &FakeDockerClient{
FakeResults: map[string][]FakeResult{
"ContainerInspect": []FakeResult{
FakeResult{nil, fmt.Errorf("error")},
},
},
},
expectedCalls: map[string][]FakeCall{
"ContainerInspect": []FakeCall{
FakeCall{jobContext, jobContainerID},
},
},
expectPanic: true,
},
{
name: "Handle container already running",
client: &FakeDockerClient{
FakeResults: map[string][]FakeResult{
"ContainerInspect": []FakeResult{
FakeResult{runningContainerInfo, nil},
},
},
},
expectedCalls: map[string][]FakeCall{
"ContainerInspect": []FakeCall{
FakeCall{jobContext, jobContainerID},
},
},
},
{
name: "Handle error starting container",
client: &FakeDockerClient{
FakeResults: map[string][]FakeResult{
"ContainerInspect": []FakeResult{
FakeResult{stoppedContainerInfo, nil},
},
"ContainerStart": []FakeResult{FakeResult{fmt.Errorf("fail")}},
},
},
expectedCalls: map[string][]FakeCall{
"ContainerInspect": []FakeCall{
FakeCall{jobContext, jobContainerID},
},
"ContainerStart": []FakeCall{
FakeCall{jobContext, jobContainerID, dockerTypes.ContainerStartOptions{}},
},
},
},
{
name: "Succesfully start a container",
client: &FakeDockerClient{
FakeResults: map[string][]FakeResult{
"ContainerInspect": []FakeResult{
FakeResult{stoppedContainerInfo, nil},
FakeResult{runningContainerInfo, nil},
FakeResult{stoppedContainerInfo, nil},
},
"ContainerStart": []FakeResult{FakeResult{nil}},
},
},
expectedCalls: map[string][]FakeCall{
"ContainerInspect": []FakeCall{
FakeCall{jobContext, jobContainerID},
FakeCall{jobContext, jobContainerID},
FakeCall{jobContext, jobContainerID},
},
"ContainerStart": []FakeCall{
FakeCall{jobContext, jobContainerID, dockerTypes.ContainerStartOptions{}},
},
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
log.Printf("Running %s", t.Name())
// Create test job
job := ContainerStartJob{
name: "test_job",
context: jobContext,
client: c.client,
containerID: jobContainerID,
}
defer func() {
// Recover from panics, if there were any
recover()
c.client.AssertFakeCalls(t, c.expectedCalls, "Failed")
}()
job.Run()
if c.expectPanic {
t.Errorf("Expected panic but got none")
}
})
}
}

54
slog/Readme.md Normal file
View File

@ -0,0 +1,54 @@
# slog
A super simple go logger
I know there are many go loggers out there that offer various logging features such as file rotation, granular verbosity settings, colored and JSON output, etc.
_Slog is not one of them._
Slog lets you hide or show debug logs as well as provides a simpler way to log messages with Warning and Error prefixes for consistency.
Also provided are a few simple methods for handling returned `error` variables, logging them out and optionally panicing or fatally exiting.
## Documentation
package slog // import "github.com/iamthefij/dockron/slog"
Package slog is a super simple logger that allows a few convenience methods
for handling debug vs warning/error logs. It also adds a few conveniences
for handling errors.
VARIABLES
var (
// DebugLevel indicates if we should log at the debug level
DebugLevel = true
)
FUNCTIONS
func Debug(format string, v ...interface{})
Debug will log with a DEBUG prefix if DebugLevel is se
func Error(format string, v ...interface{})
Error will log with a ERROR prefix
func FatalOnErr(err error, format string, v ...interface{})
FatalOnErr if error provided, will log out details of an error and exi
func Info(format string, v ...interface{})
Info formats logs with an INFO prefix
func Log(format string, v ...interface{})
Log formats logs directly to the main logger
func PanicOnErr(err error, format string, v ...interface{})
PanicOnErr if error provided, will log out details of an error and exi
func SetFlags(flag int)
SetFlags allows changing the logger flags using flags found in `log`
func WarnOnErr(err error, format string, v ...interface{})
WarnOnErr if error provided, will provide a warning if an error is provided
func Warning(format string, v ...interface{})
Warning will log with a WARNING prefix

8
slog/add-docs-to-readme.sh Executable file
View File

@ -0,0 +1,8 @@
#! /bin/bash
set -e
slogdir=$(dirname "$0")
readme="$slogdir/Readme.md"
awk '/## Documentation/ {print ; exit} {print}' "$readme" > "$readme.tmp" && go doc -all slog | sed "s/^/ /;s/[ \t]*$//" >> "$readme.tmp"
mv "$readme.tmp" "$readme"

90
slog/slog.go Normal file
View File

@ -0,0 +1,90 @@
// Package slog is a super simple logger that allows a few convenience methods
// for handling debug vs warning/error logs. It also adds a few conveniences for
// handling errors.
package slog
import (
"log"
"os"
)
var (
// DebugLevel indicates if we should log at the debug level
DebugLevel = true
// Default set of flags to use
defaultFlags = log.LstdFlags | log.Lmsgprefix
// Loggers for various levels. Prefixes are padded to align logged content
loggerInfo = log.New(os.Stderr, "INFO ", defaultFlags)
loggerWarning = log.New(os.Stderr, "WARNING ", defaultFlags)
loggerError = log.New(os.Stderr, "ERROR ", defaultFlags)
loggerDebug = log.New(os.Stderr, "DEBUG ", defaultFlags)
// Convenience for calling functions for all loggers in one method
allLoggers = []*log.Logger{
loggerInfo,
loggerWarning,
loggerError,
loggerDebug,
}
)
// SetFlags allows changing the logger flags using flags found in `log`
func SetFlags(flag int) {
for _, logger := range allLoggers {
logger.SetFlags(flag)
}
}
// Log formats logs directly to the main logger
func Log(format string, v ...interface{}) {
log.Printf(format, v...)
}
// Info formats logs with an INFO prefix
func Info(format string, v ...interface{}) {
loggerInfo.Printf(format, v...)
}
// Warning will log with a WARNING prefix
func Warning(format string, v ...interface{}) {
loggerWarning.Printf(format, v...)
}
// Error will log with a ERROR prefix
func Error(format string, v ...interface{}) {
loggerError.Printf(format, v...)
}
// Debug will log with a DEBUG prefix if DebugLevel is set
func Debug(format string, v ...interface{}) {
if !DebugLevel {
return
}
loggerDebug.Printf(format, v...)
}
// WarnOnErr if error provided, will provide a warning if an error is provided
func WarnOnErr(err error, format string, v ...interface{}) {
if err != nil {
loggerWarning.Printf(format, v...)
loggerError.Print(err)
}
}
// FatalOnErr if error provided, will log out details of an error and exit
func FatalOnErr(err error, format string, v ...interface{}) {
if err != nil {
loggerError.Printf(format, v...)
loggerError.Fatal(err)
}
}
// PanicOnErr if error provided, will log out details of an error and exit
func PanicOnErr(err error, format string, v ...interface{}) {
if err != nil {
loggerError.Printf(format, v...)
loggerError.Panic(err)
}
}