Pre-update lifecycle hook (#793)

* Make watchtower skip update if pre-update lifecycle hook exits with a non-zero exit code
#649

* Make watchtower skip update if pre-update lifecycle hook exits with a non-zero exit code
#649

* Make watchtower skip update if pre-update lifecycle hook exits with a non-zero exit code
#649

* Make watchtower skip update if pre-update lifecycle hook exits with a non-zero exit code
#649

* Make watchtower skip update if pre-update lifecycle hook exits with a non-zero exit code #649

* Make watchtower skip update if pre-update lifecycle hook exits with a non-zero exit code #649

* Make watchtower skip update if pre-update lifecycle hook exits with a non-zero exit code #649

* Prevent starting new container if old one is not stopped because of lifecycle hook.

* Add null check for c.containerInfo.State in IsRunning

* Fixed that the container would not start

* Added test for preupdate

* EX_TEMPFAIL -> ExTempFail

* Added missing fuction ouput names

* Skip preupdate when container is restarting.
pull/995/head^2
yrien30 3 years ago committed by GitHub
parent dc12a1ac7f
commit 145fe6dbcb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -4,6 +4,9 @@
These are shell commands executed with `sh`, and therefore require the container to provide the `sh`
executable.
> **DO NOTE**: If the container is not running then lifecycle hooks can not run and therefore
> the update is executed without running any lifecycle hooks.
It is possible to execute _pre/post\-check_ and _pre/post\-update_ commands
**inside** every container updated by watchtower.
@ -63,5 +66,5 @@ If the label value is explicitly set to `0`, the timeout will be disabled.
### Execution failure
The failure of a command to execute, identified by an exit code different than
0, will not prevent watchtower from updating the container. Only an error
0 or 75 (EX_TEMPFAIL), will not prevent watchtower from updating the container. Only an error
log statement containing the exit code will be reported.

@ -2,6 +2,7 @@ package mocks
import (
"errors"
"fmt"
"github.com/containrrr/watchtower/pkg/container"
"time"
@ -70,12 +71,21 @@ func (client MockClient) RemoveImageByID(id string) error {
// GetContainer is a mock method
func (client MockClient) GetContainer(containerID string) (container.Container, error) {
return container.Container{}, nil
return client.TestData.Containers[0], nil
}
// ExecuteCommand is a mock method
func (client MockClient) ExecuteCommand(containerID string, command string, timeout int) error {
return nil
func (client MockClient) ExecuteCommand(containerID string, command string, timeout int) (SkipUpdate bool, err error) {
switch command {
case "/PreUpdateReturn0.sh":
return false, nil
case "/PreUpdateReturn1.sh":
return false, fmt.Errorf("command exited with code 1")
case "/PreUpdateReturn75.sh":
return true, nil
default:
return false, nil
}
}
// IsContainerStale is always true for the mock client

@ -65,13 +65,20 @@ func CreateMockContainerWithDigest(id string, name string, image string, created
}
// CreateMockContainerWithConfig creates a container substitute valid for testing
func CreateMockContainerWithConfig(id string, name string, image string, created time.Time, config *container2.Config) container.Container {
func CreateMockContainerWithConfig(id string, name string, image string, running bool, restarting bool, created time.Time, config *container2.Config) container.Container {
content := types.ContainerJSON{
ContainerJSONBase: &types.ContainerJSONBase{
ID: id,
Image: image,
Name: name,
State: &types.ContainerState{
Running: running,
Restarting: restarting,
},
Created: created.String(),
HostConfig: &container2.HostConfig{
PortBindings: map[nat.Port][]nat.PortBinding{},
},
},
Config: config,
}

@ -1,6 +1,7 @@
package actions
import (
"errors"
"github.com/containrrr/watchtower/internal/util"
"github.com/containrrr/watchtower/pkg/container"
"github.com/containrrr/watchtower/pkg/lifecycle"
@ -81,8 +82,9 @@ func Update(client container.Client, params types.UpdateParams) (*metrics2.Metri
if params.RollingRestart {
metric.Failed += performRollingRestart(containersToUpdate, client, params)
} else {
metric.Failed += stopContainersInReversedOrder(containersToUpdate, client, params)
metric.Failed += restartContainersInSortedOrder(containersToUpdate, client, params)
imageIDsOfStoppedContainers := make(map[string]bool)
metric.Failed, imageIDsOfStoppedContainers = stopContainersInReversedOrder(containersToUpdate, client, params)
metric.Failed += restartContainersInSortedOrder(containersToUpdate, client, params, imageIDsOfStoppedContainers)
}
metric.Updated = staleCount - (metric.Failed - staleCheckFailed)
@ -99,15 +101,17 @@ func performRollingRestart(containers []container.Container, client container.Cl
for i := len(containers) - 1; i >= 0; i-- {
if containers[i].ToRestart() {
if err := stopStaleContainer(containers[i], client, params); err != nil {
err := stopStaleContainer(containers[i], client, params)
if err != nil {
failed++
}
} else {
if err := restartStaleContainer(containers[i], client, params); err != nil {
failed++
}
cleanupImageIDs[containers[i].ImageID()] = true
}
}
}
if params.Cleanup {
cleanupImages(client, cleanupImageIDs)
@ -115,14 +119,18 @@ func performRollingRestart(containers []container.Container, client container.Cl
return failed
}
func stopContainersInReversedOrder(containers []container.Container, client container.Client, params types.UpdateParams) int {
func stopContainersInReversedOrder(containers []container.Container, client container.Client, params types.UpdateParams) (int, map[string]bool) {
imageIDsOfStoppedContainers := make(map[string]bool)
failed := 0
for i := len(containers) - 1; i >= 0; i-- {
if err := stopStaleContainer(containers[i], client, params); err != nil {
failed++
} else {
imageIDsOfStoppedContainers[containers[i].ImageID()] = true
}
}
return failed
return failed, imageIDsOfStoppedContainers
}
func stopStaleContainer(container container.Container, client container.Client, params types.UpdateParams) error {
@ -135,11 +143,16 @@ func stopStaleContainer(container container.Container, client container.Client,
return nil
}
if params.LifecycleHooks {
if err := lifecycle.ExecutePreUpdateCommand(client, container); err != nil {
SkipUpdate, err := lifecycle.ExecutePreUpdateCommand(client, container)
if err != nil {
log.Error(err)
log.Info("Skipping container as the pre-update command failed")
return err
}
if SkipUpdate {
log.Debug("Skipping container as the pre-update command returned exit code 75 (EX_TEMPFAIL)")
return errors.New("Skipping container as the pre-update command returned exit code 75 (EX_TEMPFAIL)")
}
}
if err := client.StopContainer(container, params.Timeout); err != nil {
@ -149,7 +162,7 @@ func stopStaleContainer(container container.Container, client container.Client,
return nil
}
func restartContainersInSortedOrder(containers []container.Container, client container.Client, params types.UpdateParams) int {
func restartContainersInSortedOrder(containers []container.Container, client container.Client, params types.UpdateParams, imageIDsOfStoppedContainers map[string]bool) int {
imageIDs := make(map[string]bool)
failed := 0
@ -158,11 +171,13 @@ func restartContainersInSortedOrder(containers []container.Container, client con
if !c.ToRestart() {
continue
}
if imageIDsOfStoppedContainers[c.ImageID()] {
if err := restartStaleContainer(c, client, params); err != nil {
failed++
}
imageIDs[c.ImageID()] = true
}
}
if params.Cleanup {
cleanupImages(client, imageIDs)

@ -7,6 +7,7 @@ import (
"github.com/containrrr/watchtower/pkg/types"
container2 "github.com/docker/docker/api/types/container"
cli "github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
"time"
. "github.com/containrrr/watchtower/internal/actions/mocks"
@ -106,6 +107,8 @@ var _ = Describe("the update action", func() {
"test-container-02",
"test-container-02",
"fake-image2:latest",
false,
false,
time.Now(),
&container2.Config{
Labels: map[string]string{
@ -158,4 +161,187 @@ var _ = Describe("the update action", func() {
})
})
When("watchtower has been instructed to run lifecycle hooks", func() {
When("prupddate script returns 1", func() {
BeforeEach(func() {
client = CreateMockClient(
&TestData{
//NameOfContainerToKeep: "test-container-02",
Containers: []container.Container{
CreateMockContainerWithConfig(
"test-container-02",
"test-container-02",
"fake-image2:latest",
true,
false,
time.Now(),
&container2.Config{
Labels: map[string]string{
"com.centurylinklabs.watchtower.lifecycle.pre-update-timeout": "190",
"com.centurylinklabs.watchtower.lifecycle.pre-update": "/PreUpdateReturn1.sh",
},
ExposedPorts: map[nat.Port]struct{}{},
}),
},
},
dockerClient,
false,
false,
)
})
It("should not update those containers", func() {
_, err := actions.Update(client, types.UpdateParams{Cleanup: true, LifecycleHooks: true})
Expect(err).NotTo(HaveOccurred())
Expect(client.TestData.TriedToRemoveImageCount).To(Equal(0))
})
})
When("prupddate script returns 75", func() {
BeforeEach(func() {
client = CreateMockClient(
&TestData{
//NameOfContainerToKeep: "test-container-02",
Containers: []container.Container{
CreateMockContainerWithConfig(
"test-container-02",
"test-container-02",
"fake-image2:latest",
true,
false,
time.Now(),
&container2.Config{
Labels: map[string]string{
"com.centurylinklabs.watchtower.lifecycle.pre-update-timeout": "190",
"com.centurylinklabs.watchtower.lifecycle.pre-update": "/PreUpdateReturn75.sh",
},
ExposedPorts: map[nat.Port]struct{}{},
}),
},
},
dockerClient,
false,
false,
)
})
It("should not update those containers", func() {
_, err := actions.Update(client, types.UpdateParams{Cleanup: true, LifecycleHooks: true})
Expect(err).NotTo(HaveOccurred())
Expect(client.TestData.TriedToRemoveImageCount).To(Equal(0))
})
})
When("prupddate script returns 0", func() {
BeforeEach(func() {
client = CreateMockClient(
&TestData{
//NameOfContainerToKeep: "test-container-02",
Containers: []container.Container{
CreateMockContainerWithConfig(
"test-container-02",
"test-container-02",
"fake-image2:latest",
true,
false,
time.Now(),
&container2.Config{
Labels: map[string]string{
"com.centurylinklabs.watchtower.lifecycle.pre-update-timeout": "190",
"com.centurylinklabs.watchtower.lifecycle.pre-update": "/PreUpdateReturn0.sh",
},
ExposedPorts: map[nat.Port]struct{}{},
}),
},
},
dockerClient,
false,
false,
)
})
It("should update those containers", func() {
_, err := actions.Update(client, types.UpdateParams{Cleanup: true, LifecycleHooks: true})
Expect(err).NotTo(HaveOccurred())
Expect(client.TestData.TriedToRemoveImageCount).To(Equal(1))
})
})
When("container is not running", func() {
BeforeEach(func() {
client = CreateMockClient(
&TestData{
//NameOfContainerToKeep: "test-container-02",
Containers: []container.Container{
CreateMockContainerWithConfig(
"test-container-02",
"test-container-02",
"fake-image2:latest",
false,
false,
time.Now(),
&container2.Config{
Labels: map[string]string{
"com.centurylinklabs.watchtower.lifecycle.pre-update-timeout": "190",
"com.centurylinklabs.watchtower.lifecycle.pre-update": "/PreUpdateReturn1.sh",
},
ExposedPorts: map[nat.Port]struct{}{},
}),
},
},
dockerClient,
false,
false,
)
})
It("skip running preupdate", func() {
_, err := actions.Update(client, types.UpdateParams{Cleanup: true, LifecycleHooks: true})
Expect(err).NotTo(HaveOccurred())
Expect(client.TestData.TriedToRemoveImageCount).To(Equal(1))
})
})
When("container is restarting", func() {
BeforeEach(func() {
client = CreateMockClient(
&TestData{
//NameOfContainerToKeep: "test-container-02",
Containers: []container.Container{
CreateMockContainerWithConfig(
"test-container-02",
"test-container-02",
"fake-image2:latest",
false,
true,
time.Now(),
&container2.Config{
Labels: map[string]string{
"com.centurylinklabs.watchtower.lifecycle.pre-update-timeout": "190",
"com.centurylinklabs.watchtower.lifecycle.pre-update": "/PreUpdateReturn1.sh",
},
ExposedPorts: map[nat.Port]struct{}{},
}),
},
},
dockerClient,
false,
false,
)
})
It("skip running preupdate", func() {
_, err := actions.Update(client, types.UpdateParams{Cleanup: true, LifecycleHooks: true})
Expect(err).NotTo(HaveOccurred())
Expect(client.TestData.TriedToRemoveImageCount).To(Equal(1))
})
})
})
})

@ -31,7 +31,7 @@ type Client interface {
StartContainer(Container) (string, error)
RenameContainer(Container, string) error
IsContainerStale(Container) (bool, error)
ExecuteCommand(containerID string, command string, timeout int) error
ExecuteCommand(containerID string, command string, timeout int) (SkipUpdate bool, err error)
RemoveImageByID(string) error
WarnOnHeadPullFailed(container Container) bool
}
@ -356,7 +356,7 @@ func (client dockerClient) RemoveImageByID(id string) error {
return err
}
func (client dockerClient) ExecuteCommand(containerID string, command string, timeout int) error {
func (client dockerClient) ExecuteCommand(containerID string, command string, timeout int) (SkipUpdate bool, err error) {
bg := context.Background()
// Create the exec
@ -368,7 +368,7 @@ func (client dockerClient) ExecuteCommand(containerID string, command string, ti
exec, err := client.api.ContainerExecCreate(bg, containerID, execConfig)
if err != nil {
return err
return false, err
}
response, attachErr := client.api.ContainerExecAttach(bg, exec.ID, types.ExecStartCheck{
@ -383,7 +383,7 @@ func (client dockerClient) ExecuteCommand(containerID string, command string, ti
execStartCheck := types.ExecStartCheck{Detach: false, Tty: true}
err = client.api.ContainerExecStart(bg, exec.ID, execStartCheck)
if err != nil {
return err
return false, err
}
var output string
@ -400,15 +400,16 @@ func (client dockerClient) ExecuteCommand(containerID string, command string, ti
// Inspect the exec to get the exit code and print a message if the
// exit code is not success.
err = client.waitForExecOrTimeout(bg, exec.ID, output, timeout)
skipUpdate, err := client.waitForExecOrTimeout(bg, exec.ID, output, timeout)
if err != nil {
return err
return true, err
}
return nil
return skipUpdate, nil
}
func (client dockerClient) waitForExecOrTimeout(bg context.Context, ID string, execOutput string, timeout int) error {
func (client dockerClient) waitForExecOrTimeout(bg context.Context, ID string, execOutput string, timeout int) (SkipUpdate bool, err error) {
const ExTempFail = 75
var ctx context.Context
var cancel context.CancelFunc
@ -430,7 +431,7 @@ func (client dockerClient) waitForExecOrTimeout(bg context.Context, ID string, e
}).Debug("Awaiting timeout or completion")
if err != nil {
return err
return false, err
}
if execInspect.Running == true {
time.Sleep(1 * time.Second)
@ -439,13 +440,17 @@ func (client dockerClient) waitForExecOrTimeout(bg context.Context, ID string, e
if len(execOutput) > 0 {
log.Infof("Command output:\n%v", execOutput)
}
if execInspect.ExitCode == ExTempFail {
return true, nil
}
if execInspect.ExitCode > 0 {
log.Errorf("Command exited with code %v.", execInspect.ExitCode)
log.Error(execOutput)
return false, fmt.Errorf("Command exited with code %v %s", execInspect.ExitCode, execOutput)
}
break
}
return nil
return false, nil
}
func (client dockerClient) waitForStopOrTimeout(c Container, waitTime time.Duration) error {

@ -46,6 +46,13 @@ func (c Container) IsRunning() bool {
return c.containerInfo.State.Running
}
// IsRestarting returns a boolean flag indicating whether or not the current
// container is restarting. The status is determined by the value of the
// container's "State.Restarting" property.
func (c Container) IsRestarting() bool {
return c.containerInfo.State.Restarting
}
// Name returns the Docker container name.
func (c Container) Name() string {
return c.containerInfo.Name

@ -37,7 +37,8 @@ func ExecutePreCheckCommand(client container.Client, container container.Contain
}
log.Debug("Executing pre-check command.")
if err := client.ExecuteCommand(container.ID(), command, 1); err != nil {
_,err := client.ExecuteCommand(container.ID(), command, 1);
if err != nil {
log.Error(err)
}
}
@ -51,18 +52,24 @@ func ExecutePostCheckCommand(client container.Client, container container.Contai
}
log.Debug("Executing post-check command.")
if err := client.ExecuteCommand(container.ID(), command, 1); err != nil {
_,err := client.ExecuteCommand(container.ID(), command, 1);
if err != nil {
log.Error(err)
}
}
// ExecutePreUpdateCommand tries to run the pre-update lifecycle hook for a single container.
func ExecutePreUpdateCommand(client container.Client, container container.Container) error {
func ExecutePreUpdateCommand(client container.Client, container container.Container) (SkipUpdate bool,err error) {
timeout := container.PreUpdateTimeout()
command := container.GetLifecyclePreUpdateCommand()
if len(command) == 0 {
log.Debug("No pre-update command supplied. Skipping")
return nil
return false,nil
}
if !container.IsRunning() || container.IsRestarting() {
log.Debug("Container is not running. Skipping pre-update command.")
return false,nil
}
log.Debug("Executing pre-update command.")
@ -84,7 +91,9 @@ func ExecutePostUpdateCommand(client container.Client, newContainerID string) {
}
log.Debug("Executing post-update command.")
if err := client.ExecuteCommand(newContainerID, command, 1); err != nil {
_,err = client.ExecuteCommand(newContainerID, command, 1);
if err != nil {
log.Error(err)
}
}

Loading…
Cancel
Save