fix depends on behavior and simplify some of its logic (#908)

* fix depends on behavior and simplify some of its logic

* fix comments
pull/914/head
Simon Aronsson 3 years ago committed by GitHub
parent 4142f7966a
commit 3de202a965
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -154,9 +154,18 @@ func Run(c *cobra.Command, names []string) {
runOnce, _ := c.PersistentFlags().GetBool("run-once")
enableUpdateAPI, _ := c.PersistentFlags().GetBool("http-api-update")
enableMetricsAPI, _ := c.PersistentFlags().GetBool("http-api-metrics")
apiToken, _ := c.PersistentFlags().GetString("http-api-token")
if rollingRestart && monitorOnly {
log.Fatal("Rolling restarts is not compatible with the global monitor only flag")
}
awaitDockerClient()
if err := actions.CheckForSanity(client, filter, rollingRestart); err != nil {
logNotifyExit(err)
}
if runOnce {
writeStartupMessage(c, time.Time{}, filterDesc)
runUpdatesWithNotifications(filter)
@ -166,7 +175,7 @@ func Run(c *cobra.Command, names []string) {
}
if err := actions.CheckForMultipleWatchtowerInstances(client, cleanup, scope); err != nil {
log.Fatal(err)
logNotifyExit(err)
}
httpAPI := api.New(apiToken)
@ -192,6 +201,17 @@ func Run(c *cobra.Command, names []string) {
os.Exit(1)
}
func logNotifyExit(err error) {
log.Error(err)
notifier.Close()
os.Exit(1)
}
func awaitDockerClient() {
log.Debug("Sleeping for a second to ensure the docker api client has been properly initialized.")
time.Sleep(1 * time.Second)
}
func formatDuration(d time.Duration) string {
sb := strings.Builder{}

@ -2,28 +2,47 @@ package actions
import (
"fmt"
"github.com/containrrr/watchtower/pkg/types"
"sort"
"time"
"github.com/containrrr/watchtower/pkg/filters"
"github.com/containrrr/watchtower/pkg/sorter"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/containrrr/watchtower/pkg/container"
)
// CheckForSanity makes sure everything is sane before starting
func CheckForSanity(client container.Client, filter types.Filter, rollingRestarts bool) error {
log.Debug("Making sure everything is sane before starting")
if rollingRestarts {
containers, err := client.ListContainers(filter)
if err != nil {
return err
}
for _, c := range containers {
if len(c.Links()) > 0 {
return fmt.Errorf(
"%q is depending on at least one other container. This is not compatible with rolling restarts",
c.Name(),
)
}
}
}
return nil
}
// CheckForMultipleWatchtowerInstances will ensure that there are not multiple instances of the
// watchtower running simultaneously. If multiple watchtower containers are detected, this function
// will stop and remove all but the most recently started container. This behaviour can be bypassed
// if a scope UID is defined.
func CheckForMultipleWatchtowerInstances(client container.Client, cleanup bool, scope string) error {
awaitDockerClient()
containers, err := client.ListContainers(filters.FilterByScope(scope, filters.WatchtowerContainersFilter))
if err != nil {
log.Fatal(err)
return err
}
@ -45,14 +64,14 @@ func cleanupExcessWatchtowers(containers []container.Container, client container
for _, c := range allContainersExceptLast {
if err := client.StopContainer(c, 10*time.Minute); err != nil {
// logging the original here as we're just returning a count
logrus.WithError(err).Error("Could not stop a previous watchtower instance.")
log.WithError(err).Error("Could not stop a previous watchtower instance.")
stopErrors++
continue
}
if cleanup {
if err := client.RemoveImageByID(c.ImageID()); err != nil {
logrus.WithError(err).Warning("Could not cleanup watchtower images, possibly because of other watchtowers instances in other scopes.")
log.WithError(err).Warning("Could not cleanup watchtower images, possibly because of other watchtowers instances in other scopes.")
}
}
}
@ -63,8 +82,3 @@ func cleanupExcessWatchtowers(containers []container.Container, client container
return nil
}
func awaitDockerClient() {
log.Debug("Sleeping for a second to ensure the docker api client has been properly initialized.")
time.Sleep(1 * time.Second)
}

@ -50,6 +50,7 @@ func Update(client container.Client, params types.UpdateParams) (*metrics2.Metri
}
containers, err = sorter.SortByDependencies(containers)
metric.Scanned = len(containers)
if err != nil {
return nil, err
@ -57,11 +58,11 @@ func Update(client container.Client, params types.UpdateParams) (*metrics2.Metri
checkDependencies(containers)
containersToUpdate := []container.Container{}
var containersToUpdate []container.Container
if !params.MonitorOnly {
for i := len(containers) - 1; i >= 0; i-- {
if !containers[i].IsMonitorOnly() {
containersToUpdate = append(containersToUpdate, containers[i])
for _, c := range containers {
if !c.IsMonitorOnly() {
containersToUpdate = append(containersToUpdate, c)
}
}
}
@ -86,7 +87,7 @@ func performRollingRestart(containers []container.Container, client container.Cl
failed := 0
for i := len(containers) - 1; i >= 0; i-- {
if containers[i].Stale {
if containers[i].ToRestart() {
if err := stopStaleContainer(containers[i], client, params); err != nil {
failed++
}
@ -119,7 +120,7 @@ func stopStaleContainer(container container.Container, client container.Client,
return nil
}
if !container.Stale {
if !container.ToRestart() {
return nil
}
if params.LifecycleHooks {
@ -143,7 +144,7 @@ func restartContainersInSortedOrder(containers []container.Container, client con
failed := 0
for _, c := range containers {
if !c.Stale {
if !c.ToRestart() {
continue
}
if err := restartStaleContainer(c, client, params); err != nil {
@ -183,7 +184,7 @@ func restartStaleContainer(container container.Container, client container.Clien
if newContainerID, err := client.StartContainer(container); err != nil {
log.Error(err)
return err
} else if container.Stale && params.LifecycleHooks {
} else if container.ToRestart() && params.LifecycleHooks {
lifecycle.ExecutePostUpdateCommand(client, newContainerID)
}
}
@ -192,16 +193,19 @@ func restartStaleContainer(container container.Container, client container.Clien
func checkDependencies(containers []container.Container) {
for i, parent := range containers {
if parent.ToRestart() {
for _, c := range containers {
if c.ToRestart() {
continue
}
LinkLoop:
for _, linkName := range parent.Links() {
for _, child := range containers {
if child.Name() == linkName && child.ToRestart() {
containers[i].Linked = true
for _, linkName := range c.Links() {
for _, candidate := range containers {
if candidate.Name() != linkName {
continue
}
if candidate.ToRestart() {
c.LinkedToRestarting = true
break LinkLoop
}
}

@ -22,8 +22,8 @@ func NewContainer(containerInfo *types.ContainerJSON, imageInfo *types.ImageInsp
// Container represents a running Docker container.
type Container struct {
Linked bool
Stale bool
LinkedToRestarting bool
Stale bool
containerInfo *types.ContainerJSON
imageInfo *types.ImageInspect
@ -142,7 +142,7 @@ func (c Container) Links() []string {
// ToRestart return whether the container should be restarted, either because
// is stale or linked to another stale container.
func (c Container) ToRestart() bool {
return c.Stale || c.Linked
return c.Stale || c.LinkedToRestarting
}
// IsWatchtower returns a boolean flag indicating whether or not the current

@ -0,0 +1,16 @@
#!/usr/bin/env bash
# Simulates a container that will always be updated, checking whether it shuts down it's dependencies correctly.
docker rm -f parent || true
docker rm -f depending || true
CHANGE=redis:latest
KEEP=tutum/hello-world
docker tag tutum/hello-world:latest redis:latest
docker run -d --name parent $CHANGE
docker run -d --name depending --link parent $KEEP
go run . --run-once --debug $@
Loading…
Cancel
Save