refactor(update): clean up actions/update

- move common arguments to a shared struct
- remove unused fields
- fix outdated names
- improve logging/error handling
refactor-update
nils måsén 5 months ago
parent cb8e86d705
commit a6949dede9

@ -87,7 +87,7 @@ func (client MockClient) ExecuteCommand(_ t.ContainerID, command string, _ time.
} }
// IsContainerStale is true if not explicitly stated in TestData for the mock client // IsContainerStale is true if not explicitly stated in TestData for the mock client
func (client MockClient) IsContainerStale(cont t.Container, params t.UpdateParams) (bool, t.ImageID, error) { func (client MockClient) IsContainerStale(cont t.Container, _ t.UpdateParams) (bool, t.ImageID, error) {
stale, found := client.TestData.Staleness[cont.Name()] stale, found := client.TestData.Staleness[cont.Name()]
if !found { if !found {
stale = true stale = true

@ -2,6 +2,7 @@ package actions
import ( import (
"errors" "errors"
"fmt"
"github.com/containrrr/watchtower/internal/util" "github.com/containrrr/watchtower/internal/util"
"github.com/containrrr/watchtower/pkg/container" "github.com/containrrr/watchtower/pkg/container"
@ -9,37 +10,52 @@ import (
"github.com/containrrr/watchtower/pkg/session" "github.com/containrrr/watchtower/pkg/session"
"github.com/containrrr/watchtower/pkg/sorter" "github.com/containrrr/watchtower/pkg/sorter"
"github.com/containrrr/watchtower/pkg/types" "github.com/containrrr/watchtower/pkg/types"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
type updateSession struct {
client container.Client
params types.UpdateParams
progress *session.Progress
}
// Update looks at the running Docker containers to see if any of the images // Update looks at the running Docker containers to see if any of the images
// used to start those containers have been updated. If a change is detected in // used to start those containers have been updated. If a change is detected in
// any of the images, the associated containers are stopped and restarted with // any of the images, the associated containers are stopped and restarted with
// the new image. // the new image.
func Update(client container.Client, params types.UpdateParams) (types.Report, error) { func Update(client container.Client, params types.UpdateParams) (types.Report, error) {
log.Debug("Checking containers for updated images") log.Debug("Starting new update session")
progress := &session.Progress{} us := updateSession{client: client, params: params, progress: &session.Progress{}}
staleCount := 0
if params.LifecycleHooks { us.TryExecuteLifecycleCommands(types.PreCheck)
lifecycle.ExecutePreChecks(client, params)
}
containers, err := client.ListContainers(params.Filter) if err := us.run(); err != nil {
if err != nil {
return nil, err return nil, err
} }
staleCheckFailed := 0 us.TryExecuteLifecycleCommands(types.PostCheck)
return us.progress.Report(), nil
}
func (us *updateSession) run() (err error) {
containers, err := us.client.ListContainers(us.params.Filter)
if err != nil {
return err
}
for i, targetContainer := range containers { for i, targetContainer := range containers {
stale, newestImage, err := client.IsContainerStale(targetContainer, params) stale, newestImage, err := us.client.IsContainerStale(targetContainer, us.params)
shouldUpdate := stale && !params.NoRestart && !targetContainer.IsMonitorOnly(params) shouldUpdate := stale && !us.params.NoRestart && !targetContainer.IsMonitorOnly(us.params)
if err == nil && shouldUpdate { if err == nil && shouldUpdate {
// Check to make sure we have all the necessary information for recreating the container // Check to make sure we have all the necessary information for recreating the container
err = targetContainer.VerifyConfiguration() err = targetContainer.VerifyConfiguration()
// If the image information is incomplete and trace logging is enabled, log it for further diagnosis
if err != nil && log.IsLevelEnabled(log.TraceLevel) { if err != nil && log.IsLevelEnabled(log.TraceLevel) {
// If the image information is incomplete and trace logging is enabled, log it for further diagnosis
log.WithError(err).Trace("Cannot obtain enough information to recreate container")
imageInfo := targetContainer.ImageInfo() imageInfo := targetContainer.ImageInfo()
log.Tracef("Image info: %#v", imageInfo) log.Tracef("Image info: %#v", imageInfo)
log.Tracef("Container info: %#v", targetContainer.ContainerInfo()) log.Tracef("Container info: %#v", targetContainer.ContainerInfo())
@ -51,62 +67,52 @@ func Update(client container.Client, params types.UpdateParams) (types.Report, e
if err != nil { if err != nil {
log.Infof("Unable to update container %q: %v. Proceeding to next.", targetContainer.Name(), err) log.Infof("Unable to update container %q: %v. Proceeding to next.", targetContainer.Name(), err)
stale = false us.progress.AddSkipped(targetContainer, err)
staleCheckFailed++ containers[i].SetMarkedForUpdate(false)
progress.AddSkipped(targetContainer, err)
} else { } else {
progress.AddScanned(targetContainer, newestImage) us.progress.AddScanned(targetContainer, newestImage)
} containers[i].SetMarkedForUpdate(shouldUpdate)
containers[i].SetStale(stale)
if stale {
staleCount++
} }
} }
containers, err = sorter.SortByDependencies(containers) containers, err = sorter.SortByDependencies(containers)
if err != nil { if err != nil {
return nil, err return fmt.Errorf("failed to sort containers for updating: %v", err)
} }
UpdateImplicitRestart(containers) UpdateImplicitRestart(containers)
var containersToUpdate []types.Container var containersToUpdate []types.Container
for _, c := range containers { for _, c := range containers {
if !c.IsMonitorOnly(params) { if c.ToRestart() {
containersToUpdate = append(containersToUpdate, c) containersToUpdate = append(containersToUpdate, c)
progress.MarkForUpdate(c.ID()) us.progress.MarkForUpdate(c.ID())
} }
} }
if params.RollingRestart { if us.params.RollingRestart {
progress.UpdateFailed(performRollingRestart(containersToUpdate, client, params)) us.performRollingRestart(containersToUpdate)
} else { } else {
failedStop, stoppedImages := stopContainersInReversedOrder(containersToUpdate, client, params) stoppedImages := us.stopContainersInReversedOrder(containersToUpdate)
progress.UpdateFailed(failedStop) us.restartContainersInSortedOrder(containersToUpdate, stoppedImages)
failedStart := restartContainersInSortedOrder(containersToUpdate, client, params, stoppedImages)
progress.UpdateFailed(failedStart)
} }
if params.LifecycleHooks { return nil
lifecycle.ExecutePostChecks(client, params)
}
return progress.Report(), nil
} }
func performRollingRestart(containers []types.Container, client container.Client, params types.UpdateParams) map[types.ContainerID]error { func (us *updateSession) performRollingRestart(containers []types.Container) {
cleanupImageIDs := make(map[types.ImageID]bool, len(containers)) cleanupImageIDs := make(map[types.ImageID]bool, len(containers))
failed := make(map[types.ContainerID]error, len(containers)) failed := make(map[types.ContainerID]error, len(containers))
for i := len(containers) - 1; i >= 0; i-- { for i := len(containers) - 1; i >= 0; i-- {
if containers[i].ToRestart() { if containers[i].ToRestart() {
err := stopStaleContainer(containers[i], client, params) err := us.stopContainer(containers[i])
if err != nil { if err != nil {
failed[containers[i].ID()] = err failed[containers[i].ID()] = err
} else { } else {
if err := restartStaleContainer(containers[i], client, params); err != nil { if err := us.restartContainer(containers[i]); err != nil {
failed[containers[i].ID()] = err failed[containers[i].ID()] = err
} else if containers[i].IsStale() { } else if containers[i].IsMarkedForUpdate() {
// Only add (previously) stale containers' images to cleanup // Only add (previously) stale containers' images to cleanup
cleanupImageIDs[containers[i].ImageID()] = true cleanupImageIDs[containers[i].ImageID()] = true
} }
@ -114,17 +120,17 @@ func performRollingRestart(containers []types.Container, client container.Client
} }
} }
if params.Cleanup { if us.params.Cleanup {
cleanupImages(client, cleanupImageIDs) us.cleanupImages(cleanupImageIDs)
} }
return failed us.progress.UpdateFailed(failed)
} }
func stopContainersInReversedOrder(containers []types.Container, client container.Client, params types.UpdateParams) (failed map[types.ContainerID]error, stopped map[types.ImageID]bool) { func (us *updateSession) stopContainersInReversedOrder(containers []types.Container) (stopped map[types.ImageID]bool) {
failed = make(map[types.ContainerID]error, len(containers)) failed := make(map[types.ContainerID]error, len(containers))
stopped = make(map[types.ImageID]bool, len(containers)) stopped = make(map[types.ImageID]bool, len(containers))
for i := len(containers) - 1; i >= 0; i-- { for i := len(containers) - 1; i >= 0; i-- {
if err := stopStaleContainer(containers[i], client, params); err != nil { if err := us.stopContainer(containers[i]); err != nil {
failed[containers[i].ID()] = err failed[containers[i].ID()] = err
} else { } else {
// NOTE: If a container is restarted due to a dependency this might be empty // NOTE: If a container is restarted due to a dependency this might be empty
@ -132,47 +138,51 @@ func stopContainersInReversedOrder(containers []types.Container, client containe
} }
} }
return us.progress.UpdateFailed(failed)
return stopped
} }
func stopStaleContainer(container types.Container, client container.Client, params types.UpdateParams) error { func (us *updateSession) stopContainer(c types.Container) error {
if container.IsWatchtower() { if c.IsWatchtower() {
log.Debugf("This is the watchtower container %s", container.Name()) log.Debugf("This is the watchtower container %s", c.Name())
return nil return nil
} }
if !container.ToRestart() { if !c.ToRestart() {
return nil return nil
} }
// Perform an additional check here to prevent us from stopping a linked container we cannot restart // Perform an additional check here to prevent us from stopping a linked container we cannot restart
if container.IsLinkedToRestarting() { if c.IsLinkedToRestarting() {
if err := container.VerifyConfiguration(); err != nil { if err := c.VerifyConfiguration(); err != nil {
return err return err
} }
} }
if params.LifecycleHooks { if us.params.LifecycleHooks {
skipUpdate, err := lifecycle.ExecutePreUpdateCommand(client, container) err := lifecycle.ExecuteLifeCyclePhaseCommand(types.PreUpdate, us.client, c)
if err != nil { if err != nil {
if errors.Is(err, container.ErrorLifecycleSkip) {
log.Debug(err)
return err
}
log.Error(err) log.Error(err)
log.Info("Skipping container as the pre-update command failed") log.Info("Skipping container as the pre-update command failed")
return err return err
} }
if skipUpdate {
log.Debug("Skipping container as the pre-update command returned exit code 75 (EX_TEMPFAIL)")
return errors.New("skipping container as the pre-update command returned exit code 75 (EX_TEMPFAIL)")
}
} }
if err := client.StopContainer(container, params.Timeout); err != nil { if err := us.client.StopContainer(c, us.params.Timeout); err != nil {
log.Error(err) log.Error(err)
return err return err
} }
return nil return nil
} }
func restartContainersInSortedOrder(containers []types.Container, client container.Client, params types.UpdateParams, stoppedImages map[types.ImageID]bool) map[types.ContainerID]error { func (us *updateSession) restartContainersInSortedOrder(containers []types.Container, stoppedImages map[types.ImageID]bool) {
cleanupImageIDs := make(map[types.ImageID]bool, len(containers)) cleanupImageIDs := make(map[types.ImageID]bool, len(containers))
failed := make(map[types.ContainerID]error, len(containers)) failed := make(map[types.ContainerID]error, len(containers))
@ -181,58 +191,58 @@ func restartContainersInSortedOrder(containers []types.Container, client contain
continue continue
} }
if stoppedImages[c.SafeImageID()] { if stoppedImages[c.SafeImageID()] {
if err := restartStaleContainer(c, client, params); err != nil { if err := us.restartContainer(c); err != nil {
failed[c.ID()] = err failed[c.ID()] = err
} else if c.IsStale() { } else if c.IsMarkedForUpdate() {
// Only add (previously) stale containers' images to cleanup // Only add (previously) stale containers' images to cleanup
cleanupImageIDs[c.ImageID()] = true cleanupImageIDs[c.ImageID()] = true
} }
} }
} }
if params.Cleanup { if us.params.Cleanup {
cleanupImages(client, cleanupImageIDs) us.cleanupImages(cleanupImageIDs)
} }
return failed us.progress.UpdateFailed(failed)
} }
func cleanupImages(client container.Client, imageIDs map[types.ImageID]bool) { func (us *updateSession) cleanupImages(imageIDs map[types.ImageID]bool) {
for imageID := range imageIDs { for imageID := range imageIDs {
if imageID == "" { if imageID == "" {
continue continue
} }
if err := client.RemoveImageByID(imageID); err != nil { if err := us.client.RemoveImageByID(imageID); err != nil {
log.Error(err) log.Error(err)
} }
} }
} }
func restartStaleContainer(container types.Container, client container.Client, params types.UpdateParams) error { func (us *updateSession) restartContainer(container types.Container) error {
// Since we can't shutdown a watchtower container immediately, we need to
// start the new one while the old one is still running. This prevents us
// from re-using the same container name so we first rename the current
// instance so that the new one can adopt the old name.
if container.IsWatchtower() { if container.IsWatchtower() {
if err := client.RenameContainer(container, util.RandName()); err != nil { // Since we can't shut down a watchtower container immediately, we need to
// start the new one while the old one is still running. This prevents us
// from re-using the same container name, so we first rename the current
// instance so that the new one can adopt the old name.
if err := us.client.RenameContainer(container, util.RandName()); err != nil {
log.Error(err) log.Error(err)
return nil return nil
} }
} }
if !params.NoRestart { if !us.params.NoRestart {
if newContainerID, err := client.StartContainer(container); err != nil { if newContainerID, err := us.client.StartContainer(container); err != nil {
log.Error(err) log.Error(err)
return err return err
} else if container.ToRestart() && params.LifecycleHooks { } else if container.ToRestart() && us.params.LifecycleHooks {
lifecycle.ExecutePostUpdateCommand(client, newContainerID) lifecycle.ExecutePostUpdateCommand(us.client, newContainerID)
} }
} }
return nil return nil
} }
// UpdateImplicitRestart iterates through the passed containers, setting the // UpdateImplicitRestart iterates through the passed containers, setting the
// `LinkedToRestarting` flag if any of it's linked containers are marked for restart // `linkedToRestarting` flag if any of its linked containers are marked for restart
func UpdateImplicitRestart(containers []types.Container) { func UpdateImplicitRestart(containers []types.Container) {
for ci, c := range containers { for ci, c := range containers {
@ -265,3 +275,23 @@ func linkedContainerMarkedForRestart(links []string, containers []types.Containe
} }
return "" return ""
} }
// TryExecuteLifecycleCommands tries to run the corresponding lifecycle hook for all containers included by the current filter.
func (us *updateSession) TryExecuteLifecycleCommands(phase types.LifecyclePhase) {
if !us.params.LifecycleHooks {
return
}
containers, err := us.client.ListContainers(us.params.Filter)
if err != nil {
log.WithError(err).Warn("Skipping lifecycle commands. Failed to list containers.")
return
}
for _, c := range containers {
err := lifecycle.ExecuteLifeCyclePhaseCommand(phase, us.client, c)
if err != nil {
log.WithField("container", c.Name()).Error(err)
}
}
}

@ -396,7 +396,10 @@ func (client dockerClient) PullImage(ctx context.Context, container t.Container)
return err return err
} }
defer response.Close() defer func() {
_ = response.Close()
}()
// the pull request will be aborted prematurely unless the response is read // the pull request will be aborted prematurely unless the response is read
if _, err = io.ReadAll(response); err != nil { if _, err = io.ReadAll(response); err != nil {
log.Error(err) log.Error(err)

@ -6,29 +6,6 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
type ExecCommandFunc func(client container.Client, container types.Container)
// ExecutePreCheckCommand tries to run the pre-check lifecycle hook for a single container.
func ExecutePreCheckCommand(client container.Client, container types.Container) {
err := ExecuteLifeCyclePhaseCommand(types.PreCheck, client, container)
if err != nil {
log.WithField("container", container.Name()).Error(err)
}
}
// ExecutePostCheckCommand tries to run the post-check lifecycle hook for a single container.
func ExecutePostCheckCommand(client container.Client, container types.Container) {
err := ExecuteLifeCyclePhaseCommand(types.PostCheck, client, container)
if err != nil {
log.WithField("container", container.Name()).Error(err)
}
}
// ExecutePreUpdateCommand tries to run the pre-update lifecycle hook for a single container.
func ExecutePreUpdateCommand(client container.Client, container types.Container) error {
return ExecuteLifeCyclePhaseCommand(types.PreUpdate, client, container)
}
// ExecutePostUpdateCommand tries to run the post-update lifecycle hook for a single container. // ExecutePostUpdateCommand tries to run the post-update lifecycle hook for a single container.
func ExecutePostUpdateCommand(client container.Client, newContainerID types.ContainerID) { func ExecutePostUpdateCommand(client container.Client, newContainerID types.ContainerID) {
newContainer, err := client.GetContainer(newContainerID) newContainer, err := client.GetContainer(newContainerID)

Loading…
Cancel
Save