Add additional linting
continuous-integration/drone/push Build is failing Details

This commit is contained in:
IamTheFij 2021-04-30 11:24:14 -07:00
parent f651d55786
commit 15625a05fb
3 changed files with 43 additions and 19 deletions

View File

@ -12,8 +12,7 @@ repos:
hooks:
- id: go-fmt
- id: go-imports
# - id: gometalinter
# - id: golangci-lint
- id: golangci-lint
- repo: https://github.com/IamTheFij/docker-pre-commit
rev: v2.0.0
hooks:

26
main.go
View File

@ -71,6 +71,7 @@ func (job ContainerStartJob) Run() {
if containerJSON.State.Running {
slog.Warningf("Container is already running. Skipping %s", job.name)
return
}
@ -103,7 +104,6 @@ func (job ContainerStartJob) Run() {
containerJSON.State.ExitCode,
)
}
}
// Name returns the name of the job
@ -141,6 +141,7 @@ func (job ContainerExecJob) Run() {
if !containerJSON.State.Running {
slog.Warningf("Container not running. Skipping %s", job.name)
return
}
@ -169,11 +170,14 @@ func (job ContainerExecJob) Run() {
execID.ID,
)
slog.Debugf("Exec info: %+v", execInfo)
if err != nil {
// Nothing we can do if we got an error here, so let's go
slog.OnErrWarnf(err, "Could not get status for exec job %s", job.name)
return
}
time.Sleep(1 * time.Second)
}
slog.Debugf("Done execing %s. %+v", job.name, execInfo)
@ -198,6 +202,7 @@ func QueryScheduledJobs(client ContainerClient) (jobs []ContainerCronJob) {
// Add start job
if val, ok := container.Labels[schedLabel]; ok {
jobName := strings.Join(container.Names, "/")
jobs = append(jobs, ContainerStartJob{
client: client,
containerID: container.ID,
@ -209,9 +214,12 @@ func QueryScheduledJobs(client ContainerClient) (jobs []ContainerCronJob) {
// Add exec jobs
execJobs := map[string]map[string]string{}
for label, value := range container.Labels {
results := execLabelRegexp.FindStringSubmatch(label)
if len(results) == 3 {
expectedLabelParts := 3
if len(results) == expectedLabelParts {
// We've got part of a new job
jobName, jobField := results[1], results[2]
if partJob, ok := execJobs[jobName]; ok {
@ -225,15 +233,18 @@ func QueryScheduledJobs(client ContainerClient) (jobs []ContainerCronJob) {
}
}
}
for jobName, jobConfig := range execJobs {
schedule, ok := jobConfig["schedule"]
if !ok {
continue
}
shellCommand, ok := jobConfig["command"]
if !ok {
continue
}
jobs = append(jobs, ContainerExecJob{
ContainerStartJob: ContainerStartJob{
client: client,
@ -247,7 +258,7 @@ func QueryScheduledJobs(client ContainerClient) (jobs []ContainerCronJob) {
}
}
return
return jobs
}
// ScheduleJobs accepts a Cron instance and a list of jobs to schedule.
@ -266,6 +277,7 @@ func ScheduleJobs(c *cron.Cron, jobs []ContainerCronJob) {
// unschedule it later
slog.Debugf("Job %s is already scheduled. Skipping", job.Name())
delete(existingJobs, job.UniqueName())
continue
}
@ -299,14 +311,14 @@ func ScheduleJobs(c *cron.Cron, jobs []ContainerCronJob) {
func main() {
// Get a Docker Client
client, err := dockerClient.NewClientWithOpts(dockerClient.FromEnv)
if err != nil {
panic(err)
}
slog.OnErrPanicf(err, "Could not create Docker client")
// Read interval for polling Docker
var watchInterval time.Duration
showVersion := flag.Bool("version", false, "Display the version of dockron and exit")
flag.DurationVar(&watchInterval, "watch", defaultWatchInterval, "Interval used to poll Docker for changes")
var showVersion = flag.Bool("version", false, "Display the version of dockron and exit")
flag.BoolVar(&slog.DebugLevel, "debug", false, "Show debug logs")
flag.Parse()

View File

@ -1,6 +1,7 @@
package main
import (
"errors"
"fmt"
"log"
"reflect"
@ -29,6 +30,8 @@ var (
},
},
}
errGeneric = errors.New("error")
)
// FakeCall represents a faked method call
@ -75,6 +78,7 @@ func (fakeClient *FakeDockerClient) ContainerStart(context context.Context, cont
if results[0] != nil {
e = results[0].(error)
}
return
}
@ -83,9 +87,11 @@ func (fakeClient *FakeDockerClient) ContainerList(context context.Context, optio
if results[0] != nil {
c = results[0].([]dockerTypes.Container)
}
if results[1] != nil {
e = results[1].(error)
}
return
}
@ -94,9 +100,11 @@ func (fakeClient *FakeDockerClient) ContainerExecCreate(ctx context.Context, con
if results[0] != nil {
r = results[0].(dockerTypes.IDResponse)
}
if results[1] != nil {
e = results[1].(error)
}
return
}
@ -105,6 +113,7 @@ func (fakeClient *FakeDockerClient) ContainerExecStart(ctx context.Context, exec
if results[0] != nil {
e = results[0].(error)
}
return
}
@ -113,9 +122,11 @@ func (fakeClient *FakeDockerClient) ContainerExecInspect(ctx context.Context, ex
if results[0] != nil {
r = results[0].(dockerTypes.ContainerExecInspect)
}
if results[1] != nil {
e = results[1].(error)
}
return
}
@ -124,9 +135,11 @@ func (fakeClient *FakeDockerClient) ContainerInspect(ctx context.Context, contai
if results[0] != nil {
r = results[0].(dockerTypes.ContainerJSON)
}
if results[1] != nil {
e = results[1].(error)
}
return
}
@ -653,8 +666,8 @@ func TestDoLoop(t *testing.T) {
// to a subpackage that offers a single function for interfacing with the
// Docker client to start or exec a container so that Dockron needn't care.
func TestRunExecJobs(t *testing.T) {
var jobContext context.Context
jobContainerID := "container_id"
jobCommand := "true"
@ -669,7 +682,7 @@ func TestRunExecJobs(t *testing.T) {
client: &FakeDockerClient{
FakeResults: map[string][]FakeResult{
"ContainerInspect": []FakeResult{
FakeResult{nil, fmt.Errorf("error")},
FakeResult{nil, errGeneric},
},
},
},
@ -703,7 +716,7 @@ func TestRunExecJobs(t *testing.T) {
FakeResult{runningContainerInfo, nil},
},
"ContainerExecCreate": []FakeResult{
FakeResult{nil, fmt.Errorf("fail")},
FakeResult{nil, errGeneric},
},
},
},
@ -734,7 +747,7 @@ func TestRunExecJobs(t *testing.T) {
FakeResult{dockerTypes.IDResponse{ID: "id"}, nil},
},
"ContainerExecStart": []FakeResult{
FakeResult{fmt.Errorf("fail")},
FakeResult{errGeneric},
},
},
},
@ -771,7 +784,7 @@ func TestRunExecJobs(t *testing.T) {
FakeResult{nil},
},
"ContainerExecInspect": []FakeResult{
FakeResult{nil, fmt.Errorf("fail")},
FakeResult{nil, errGeneric},
},
},
},
@ -877,6 +890,7 @@ func TestRunExecJobs(t *testing.T) {
// Docker client to start or exec a container so that Dockron needn't care.
func TestRunStartJobs(t *testing.T) {
var jobContext context.Context
jobContainerID := "container_id"
cases := []struct {
@ -890,7 +904,7 @@ func TestRunStartJobs(t *testing.T) {
client: &FakeDockerClient{
FakeResults: map[string][]FakeResult{
"ContainerInspect": []FakeResult{
FakeResult{nil, fmt.Errorf("error")},
FakeResult{nil, errGeneric},
},
},
},
@ -923,7 +937,7 @@ func TestRunStartJobs(t *testing.T) {
"ContainerInspect": []FakeResult{
FakeResult{stoppedContainerInfo, nil},
},
"ContainerStart": []FakeResult{FakeResult{fmt.Errorf("fail")}},
"ContainerStart": []FakeResult{FakeResult{errGeneric}},
},
},
expectedCalls: map[string][]FakeCall{
@ -936,7 +950,7 @@ func TestRunStartJobs(t *testing.T) {
},
},
{
name: "Succesfully start a container",
name: "Successfully start a container",
client: &FakeDockerClient{
FakeResults: map[string][]FakeResult{
"ContainerInspect": []FakeResult{
@ -974,14 +988,13 @@ func TestRunStartJobs(t *testing.T) {
defer func() {
// Recover from panics, if there were any
recover()
_ = recover()
c.client.AssertFakeCalls(t, c.expectedCalls, "Failed")
}()
job.Run()
if c.expectPanic {
t.Errorf("Expected panic but got none")
}
})
}
}