Move logger module to a subpackage

This will allow me to use my logging helpers from another project
This commit is contained in:
IamTheFij 2020-08-16 15:03:09 -07:00
parent 3ee32b632a
commit 4cb0b027cd
3 changed files with 32 additions and 20 deletions

39
main.go
View File

@ -11,6 +11,7 @@ import (
dockerTypes "github.com/docker/docker/api/types" dockerTypes "github.com/docker/docker/api/types"
dockerClient "github.com/docker/docker/client" dockerClient "github.com/docker/docker/client"
"github.com/iamthefij/dockron/slog"
"github.com/robfig/cron/v3" "github.com/robfig/cron/v3"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -67,10 +68,10 @@ func (job ContainerStartJob) Run() {
job.context, job.context,
job.containerID, job.containerID,
) )
PanicErr(err, "Could not get container details for job %s", job.name) slog.PanicErr(err, "Could not get container details for job %s", job.name)
if containerJSON.State.Running { if containerJSON.State.Running {
LogWarning("Container is already running. Skipping %s", job.name) slog.LogWarning("Container is already running. Skipping %s", job.name)
return return
} }
@ -80,24 +81,24 @@ func (job ContainerStartJob) Run() {
job.containerID, job.containerID,
dockerTypes.ContainerStartOptions{}, dockerTypes.ContainerStartOptions{},
) )
PanicErr(err, "Could not start container for jobb %s", job.name) slog.PanicErr(err, "Could not start container for jobb %s", job.name)
// Check results of job // Check results of job
for check := true; check; check = containerJSON.State.Running { for check := true; check; check = containerJSON.State.Running {
LogDebug("Still running %s", job.name) slog.LogDebug("Still running %s", job.name)
containerJSON, err = job.client.ContainerInspect( containerJSON, err = job.client.ContainerInspect(
job.context, job.context,
job.containerID, job.containerID,
) )
PanicErr(err, "Could not get container details for job %s", job.name) slog.PanicErr(err, "Could not get container details for job %s", job.name)
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
} }
LogDebug("Done execing %s. %+v", job.name, containerJSON.State) slog.LogDebug("Done execing %s. %+v", job.name, containerJSON.State)
// Log exit code if failed // Log exit code if failed
if containerJSON.State.ExitCode != 0 { if containerJSON.State.ExitCode != 0 {
LogError( slog.LogError(
"Exec job %s existed with code %d", "Exec job %s existed with code %d",
job.name, job.name,
containerJSON.State.ExitCode, containerJSON.State.ExitCode,
@ -137,10 +138,10 @@ func (job ContainerExecJob) Run() {
job.context, job.context,
job.containerID, job.containerID,
) )
PanicErr(err, "Could not get container details for job %s", job.name) slog.PanicErr(err, "Could not get container details for job %s", job.name)
if !containerJSON.State.Running { if !containerJSON.State.Running {
LogWarning("Container not running. Skipping %s", job.name) slog.LogWarning("Container not running. Skipping %s", job.name)
return return
} }
@ -151,19 +152,19 @@ func (job ContainerExecJob) Run() {
Cmd: []string{"sh", "-c", strings.TrimSpace(job.shellCommand)}, Cmd: []string{"sh", "-c", strings.TrimSpace(job.shellCommand)},
}, },
) )
PanicErr(err, "Could not create container exec job for %s", job.name) slog.PanicErr(err, "Could not create container exec job for %s", job.name)
err = job.client.ContainerExecStart( err = job.client.ContainerExecStart(
job.context, job.context,
execID.ID, execID.ID,
dockerTypes.ExecStartCheck{}, dockerTypes.ExecStartCheck{},
) )
PanicErr(err, "Could not start container exec job for %s", job.name) slog.PanicErr(err, "Could not start container exec job for %s", job.name)
// Wait for job results // Wait for job results
execInfo := dockerTypes.ContainerExecInspect{Running: true} execInfo := dockerTypes.ContainerExecInspect{Running: true}
for execInfo.Running { for execInfo.Running {
LogDebug("Still execing %s", job.name) slog.LogDebug("Still execing %s", job.name)
execInfo, err = job.client.ContainerExecInspect( execInfo, err = job.client.ContainerExecInspect(
job.context, job.context,
execID.ID, execID.ID,
@ -173,23 +174,23 @@ func (job ContainerExecJob) Run() {
} }
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
} }
LogDebug("Done execing %s. %+v", job.name, execInfo) slog.LogDebug("Done execing %s. %+v", job.name, execInfo)
// Log exit code if failed // Log exit code if failed
if execInfo.ExitCode != 0 { if execInfo.ExitCode != 0 {
LogError("Exec job %s existed with code %d", job.name, execInfo.ExitCode) slog.LogError("Exec job %s existed with code %d", job.name, execInfo.ExitCode)
} }
} }
// QueryScheduledJobs queries Docker for all containers with a schedule and // QueryScheduledJobs queries Docker for all containers with a schedule and
// returns a list of ContainerCronJob records to be scheduled // returns a list of ContainerCronJob records to be scheduled
func QueryScheduledJobs(client ContainerClient) (jobs []ContainerCronJob) { func QueryScheduledJobs(client ContainerClient) (jobs []ContainerCronJob) {
LogDebug("Scanning containers for new schedules...") slog.LogDebug("Scanning containers for new schedules...")
containers, err := client.ContainerList( containers, err := client.ContainerList(
context.Background(), context.Background(),
dockerTypes.ContainerListOptions{All: true}, dockerTypes.ContainerListOptions{All: true},
) )
PanicErr(err, "Failure querying docker containers") slog.PanicErr(err, "Failure querying docker containers")
for _, container := range containers { for _, container := range containers {
// Add start job // Add start job
@ -261,7 +262,7 @@ func ScheduleJobs(c *cron.Cron, jobs []ContainerCronJob) {
if _, ok := existingJobs[job.UniqueName()]; ok { if _, ok := existingJobs[job.UniqueName()]; ok {
// Job already exists, remove it from existing jobs so we don't // Job already exists, remove it from existing jobs so we don't
// unschedule it later // unschedule it later
LogDebug("Job %s is already scheduled. Skipping", job.Name()) slog.LogDebug("Job %s is already scheduled. Skipping", job.Name())
delete(existingJobs, job.UniqueName()) delete(existingJobs, job.UniqueName())
continue continue
} }
@ -277,7 +278,7 @@ func ScheduleJobs(c *cron.Cron, jobs []ContainerCronJob) {
) )
} else { } else {
// TODO: Track something for a healthcheck here // TODO: Track something for a healthcheck here
LogError( slog.LogError(
"Could not schedule %s (%s) with schedule '%s'. %v\n", "Could not schedule %s (%s) with schedule '%s'. %v\n",
job.Name(), job.Name(),
job.UniqueName(), job.UniqueName(),
@ -304,7 +305,7 @@ func main() {
var watchInterval time.Duration var watchInterval time.Duration
flag.DurationVar(&watchInterval, "watch", defaultWatchInterval, "Interval used to poll Docker for changes") flag.DurationVar(&watchInterval, "watch", defaultWatchInterval, "Interval used to poll Docker for changes")
var showVersion = flag.Bool("version", false, "Display the version of dockron and exit") var showVersion = flag.Bool("version", false, "Display the version of dockron and exit")
flag.BoolVar(&DebugLevel, "debug", false, "Show debug logs") flag.BoolVar(&slog.DebugLevel, "debug", false, "Show debug logs")
flag.Parse() flag.Parse()
// Print version if asked // Print version if asked

11
slog/Readme.md Normal file
View File

@ -0,0 +1,11 @@
# slog
A super simple go logger
I know there are many go loggers out there that offer various logging features such as file rotation, granular verbosity settings, colored and JSON output, etc.
_Slog is not one of them._
Slog lets you hide or show debug logs as well as provides a simpler way to log messages with Warning and Error prefixes for consistency.
Also provided are a few simple methods for handling returned `error` variables, logging them out and optionally panicing or fatally exiting.

View File

@ -1,4 +1,4 @@
package main package slog
import ( import (
"log" "log"