refactor(update): clean up actions/update

- move common arguments to a shared struct
- remove unused fields
- fix outdated names
- improve logging/error handling
refactor-update
nils måsén 4 months ago
parent cb8e86d705
commit a6949dede9

@ -87,7 +87,7 @@ func (client MockClient) ExecuteCommand(_ t.ContainerID, command string, _ time.
}
// IsContainerStale is true if not explicitly stated in TestData for the mock client
func (client MockClient) IsContainerStale(cont t.Container, params t.UpdateParams) (bool, t.ImageID, error) {
func (client MockClient) IsContainerStale(cont t.Container, _ t.UpdateParams) (bool, t.ImageID, error) {
stale, found := client.TestData.Staleness[cont.Name()]
if !found {
stale = true

@ -2,6 +2,7 @@ package actions
import (
"errors"
"fmt"
"github.com/containrrr/watchtower/internal/util"
"github.com/containrrr/watchtower/pkg/container"
@ -9,37 +10,52 @@ import (
"github.com/containrrr/watchtower/pkg/session"
"github.com/containrrr/watchtower/pkg/sorter"
"github.com/containrrr/watchtower/pkg/types"
log "github.com/sirupsen/logrus"
)
type updateSession struct {
client container.Client
params types.UpdateParams
progress *session.Progress
}
// Update looks at the running Docker containers to see if any of the images
// used to start those containers have been updated. If a change is detected in
// any of the images, the associated containers are stopped and restarted with
// the new image.
func Update(client container.Client, params types.UpdateParams) (types.Report, error) {
log.Debug("Checking containers for updated images")
progress := &session.Progress{}
staleCount := 0
log.Debug("Starting new update session")
us := updateSession{client: client, params: params, progress: &session.Progress{}}
if params.LifecycleHooks {
lifecycle.ExecutePreChecks(client, params)
}
us.TryExecuteLifecycleCommands(types.PreCheck)
containers, err := client.ListContainers(params.Filter)
if err != nil {
if err := us.run(); err != nil {
return nil, err
}
staleCheckFailed := 0
us.TryExecuteLifecycleCommands(types.PostCheck)
return us.progress.Report(), nil
}
func (us *updateSession) run() (err error) {
containers, err := us.client.ListContainers(us.params.Filter)
if err != nil {
return err
}
for i, targetContainer := range containers {
stale, newestImage, err := client.IsContainerStale(targetContainer, params)
shouldUpdate := stale && !params.NoRestart && !targetContainer.IsMonitorOnly(params)
stale, newestImage, err := us.client.IsContainerStale(targetContainer, us.params)
shouldUpdate := stale && !us.params.NoRestart && !targetContainer.IsMonitorOnly(us.params)
if err == nil && shouldUpdate {
// Check to make sure we have all the necessary information for recreating the container
err = targetContainer.VerifyConfiguration()
// If the image information is incomplete and trace logging is enabled, log it for further diagnosis
if err != nil && log.IsLevelEnabled(log.TraceLevel) {
// If the image information is incomplete and trace logging is enabled, log it for further diagnosis
log.WithError(err).Trace("Cannot obtain enough information to recreate container")
imageInfo := targetContainer.ImageInfo()
log.Tracef("Image info: %#v", imageInfo)
log.Tracef("Container info: %#v", targetContainer.ContainerInfo())
@ -51,62 +67,52 @@ func Update(client container.Client, params types.UpdateParams) (types.Report, e
if err != nil {
log.Infof("Unable to update container %q: %v. Proceeding to next.", targetContainer.Name(), err)
stale = false
staleCheckFailed++
progress.AddSkipped(targetContainer, err)
us.progress.AddSkipped(targetContainer, err)
containers[i].SetMarkedForUpdate(false)
} else {
progress.AddScanned(targetContainer, newestImage)
}
containers[i].SetStale(stale)
if stale {
staleCount++
us.progress.AddScanned(targetContainer, newestImage)
containers[i].SetMarkedForUpdate(shouldUpdate)
}
}
containers, err = sorter.SortByDependencies(containers)
if err != nil {
return nil, err
return fmt.Errorf("failed to sort containers for updating: %v", err)
}
UpdateImplicitRestart(containers)
var containersToUpdate []types.Container
for _, c := range containers {
if !c.IsMonitorOnly(params) {
if c.ToRestart() {
containersToUpdate = append(containersToUpdate, c)
progress.MarkForUpdate(c.ID())
us.progress.MarkForUpdate(c.ID())
}
}
if params.RollingRestart {
progress.UpdateFailed(performRollingRestart(containersToUpdate, client, params))
if us.params.RollingRestart {
us.performRollingRestart(containersToUpdate)
} else {
failedStop, stoppedImages := stopContainersInReversedOrder(containersToUpdate, client, params)
progress.UpdateFailed(failedStop)
failedStart := restartContainersInSortedOrder(containersToUpdate, client, params, stoppedImages)
progress.UpdateFailed(failedStart)
stoppedImages := us.stopContainersInReversedOrder(containersToUpdate)
us.restartContainersInSortedOrder(containersToUpdate, stoppedImages)
}
if params.LifecycleHooks {
lifecycle.ExecutePostChecks(client, params)
}
return progress.Report(), nil
return nil
}
func performRollingRestart(containers []types.Container, client container.Client, params types.UpdateParams) map[types.ContainerID]error {
func (us *updateSession) performRollingRestart(containers []types.Container) {
cleanupImageIDs := make(map[types.ImageID]bool, len(containers))
failed := make(map[types.ContainerID]error, len(containers))
for i := len(containers) - 1; i >= 0; i-- {
if containers[i].ToRestart() {
err := stopStaleContainer(containers[i], client, params)
err := us.stopContainer(containers[i])
if err != nil {
failed[containers[i].ID()] = err
} else {
if err := restartStaleContainer(containers[i], client, params); err != nil {
if err := us.restartContainer(containers[i]); err != nil {
failed[containers[i].ID()] = err
} else if containers[i].IsStale() {
} else if containers[i].IsMarkedForUpdate() {
// Only add (previously) stale containers' images to cleanup
cleanupImageIDs[containers[i].ImageID()] = true
}
@ -114,17 +120,17 @@ func performRollingRestart(containers []types.Container, client container.Client
}
}
if params.Cleanup {
cleanupImages(client, cleanupImageIDs)
if us.params.Cleanup {
us.cleanupImages(cleanupImageIDs)
}
return failed
us.progress.UpdateFailed(failed)
}
func stopContainersInReversedOrder(containers []types.Container, client container.Client, params types.UpdateParams) (failed map[types.ContainerID]error, stopped map[types.ImageID]bool) {
failed = make(map[types.ContainerID]error, len(containers))
func (us *updateSession) stopContainersInReversedOrder(containers []types.Container) (stopped map[types.ImageID]bool) {
failed := make(map[types.ContainerID]error, len(containers))
stopped = make(map[types.ImageID]bool, len(containers))
for i := len(containers) - 1; i >= 0; i-- {
if err := stopStaleContainer(containers[i], client, params); err != nil {
if err := us.stopContainer(containers[i]); err != nil {
failed[containers[i].ID()] = err
} else {
// NOTE: If a container is restarted due to a dependency this might be empty
@ -132,47 +138,51 @@ func stopContainersInReversedOrder(containers []types.Container, client containe
}
}
return
us.progress.UpdateFailed(failed)
return stopped
}
func stopStaleContainer(container types.Container, client container.Client, params types.UpdateParams) error {
if container.IsWatchtower() {
log.Debugf("This is the watchtower container %s", container.Name())
func (us *updateSession) stopContainer(c types.Container) error {
if c.IsWatchtower() {
log.Debugf("This is the watchtower container %s", c.Name())
return nil
}
if !container.ToRestart() {
if !c.ToRestart() {
return nil
}
// Perform an additional check here to prevent us from stopping a linked container we cannot restart
if container.IsLinkedToRestarting() {
if err := container.VerifyConfiguration(); err != nil {
if c.IsLinkedToRestarting() {
if err := c.VerifyConfiguration(); err != nil {
return err
}
}
if params.LifecycleHooks {
skipUpdate, err := lifecycle.ExecutePreUpdateCommand(client, container)
if us.params.LifecycleHooks {
err := lifecycle.ExecuteLifeCyclePhaseCommand(types.PreUpdate, us.client, c)
if err != nil {
if errors.Is(err, container.ErrorLifecycleSkip) {
log.Debug(err)
return err
}
log.Error(err)
log.Info("Skipping container as the pre-update command failed")
return err
}
if skipUpdate {
log.Debug("Skipping container as the pre-update command returned exit code 75 (EX_TEMPFAIL)")
return errors.New("skipping container as the pre-update command returned exit code 75 (EX_TEMPFAIL)")
}
}
if err := client.StopContainer(container, params.Timeout); err != nil {
if err := us.client.StopContainer(c, us.params.Timeout); err != nil {
log.Error(err)
return err
}
return nil
}
func restartContainersInSortedOrder(containers []types.Container, client container.Client, params types.UpdateParams, stoppedImages map[types.ImageID]bool) map[types.ContainerID]error {
func (us *updateSession) restartContainersInSortedOrder(containers []types.Container, stoppedImages map[types.ImageID]bool) {
cleanupImageIDs := make(map[types.ImageID]bool, len(containers))
failed := make(map[types.ContainerID]error, len(containers))
@ -181,58 +191,58 @@ func restartContainersInSortedOrder(containers []types.Container, client contain
continue
}
if stoppedImages[c.SafeImageID()] {
if err := restartStaleContainer(c, client, params); err != nil {
if err := us.restartContainer(c); err != nil {
failed[c.ID()] = err
} else if c.IsStale() {
} else if c.IsMarkedForUpdate() {
// Only add (previously) stale containers' images to cleanup
cleanupImageIDs[c.ImageID()] = true
}
}
}
if params.Cleanup {
cleanupImages(client, cleanupImageIDs)
if us.params.Cleanup {
us.cleanupImages(cleanupImageIDs)
}
return failed
us.progress.UpdateFailed(failed)
}
func cleanupImages(client container.Client, imageIDs map[types.ImageID]bool) {
func (us *updateSession) cleanupImages(imageIDs map[types.ImageID]bool) {
for imageID := range imageIDs {
if imageID == "" {
continue
}
if err := client.RemoveImageByID(imageID); err != nil {
if err := us.client.RemoveImageByID(imageID); err != nil {
log.Error(err)
}
}
}
func restartStaleContainer(container types.Container, client container.Client, params types.UpdateParams) error {
// Since we can't shutdown a watchtower container immediately, we need to
// start the new one while the old one is still running. This prevents us
// from re-using the same container name so we first rename the current
// instance so that the new one can adopt the old name.
func (us *updateSession) restartContainer(container types.Container) error {
if container.IsWatchtower() {
if err := client.RenameContainer(container, util.RandName()); err != nil {
// Since we can't shut down a watchtower container immediately, we need to
// start the new one while the old one is still running. This prevents us
// from re-using the same container name, so we first rename the current
// instance so that the new one can adopt the old name.
if err := us.client.RenameContainer(container, util.RandName()); err != nil {
log.Error(err)
return nil
}
}
if !params.NoRestart {
if newContainerID, err := client.StartContainer(container); err != nil {
if !us.params.NoRestart {
if newContainerID, err := us.client.StartContainer(container); err != nil {
log.Error(err)
return err
} else if container.ToRestart() && params.LifecycleHooks {
lifecycle.ExecutePostUpdateCommand(client, newContainerID)
} else if container.ToRestart() && us.params.LifecycleHooks {
lifecycle.ExecutePostUpdateCommand(us.client, newContainerID)
}
}
return nil
}
// UpdateImplicitRestart iterates through the passed containers, setting the
// `LinkedToRestarting` flag if any of it's linked containers are marked for restart
// `linkedToRestarting` flag if any of its linked containers are marked for restart
func UpdateImplicitRestart(containers []types.Container) {
for ci, c := range containers {
@ -265,3 +275,23 @@ func linkedContainerMarkedForRestart(links []string, containers []types.Containe
}
return ""
}
// TryExecuteLifecycleCommands tries to run the corresponding lifecycle hook for all containers included by the current filter.
func (us *updateSession) TryExecuteLifecycleCommands(phase types.LifecyclePhase) {
if !us.params.LifecycleHooks {
return
}
containers, err := us.client.ListContainers(us.params.Filter)
if err != nil {
log.WithError(err).Warn("Skipping lifecycle commands. Failed to list containers.")
return
}
for _, c := range containers {
err := lifecycle.ExecuteLifeCyclePhaseCommand(phase, us.client, c)
if err != nil {
log.WithField("container", c.Name()).Error(err)
}
}
}

@ -396,7 +396,10 @@ func (client dockerClient) PullImage(ctx context.Context, container t.Container)
return err
}
defer response.Close()
defer func() {
_ = response.Close()
}()
// the pull request will be aborted prematurely unless the response is read
if _, err = io.ReadAll(response); err != nil {
log.Error(err)

@ -6,29 +6,6 @@ import (
log "github.com/sirupsen/logrus"
)
type ExecCommandFunc func(client container.Client, container types.Container)
// ExecutePreCheckCommand tries to run the pre-check lifecycle hook for a single container.
func ExecutePreCheckCommand(client container.Client, container types.Container) {
err := ExecuteLifeCyclePhaseCommand(types.PreCheck, client, container)
if err != nil {
log.WithField("container", container.Name()).Error(err)
}
}
// ExecutePostCheckCommand tries to run the post-check lifecycle hook for a single container.
func ExecutePostCheckCommand(client container.Client, container types.Container) {
err := ExecuteLifeCyclePhaseCommand(types.PostCheck, client, container)
if err != nil {
log.WithField("container", container.Name()).Error(err)
}
}
// ExecutePreUpdateCommand tries to run the pre-update lifecycle hook for a single container.
func ExecutePreUpdateCommand(client container.Client, container types.Container) error {
return ExecuteLifeCyclePhaseCommand(types.PreUpdate, client, container)
}
// ExecutePostUpdateCommand tries to run the post-update lifecycle hook for a single container.
func ExecutePostUpdateCommand(client container.Client, newContainerID types.ContainerID) {
newContainer, err := client.GetContainer(newContainerID)

Loading…
Cancel
Save