mirror of https://github.com/tailscale/tailscale/
cmd/k8s-operator,k8s-operator,kube: Add TSRecorder CRD + controller (#13299)
cmd/k8s-operator,k8s-operator,kube: Add TSRecorder CRD + controller Deploys tsrecorder images to the operator's cluster. S3 storage is configured via environment variables from a k8s Secret. Currently only supports a single tsrecorder replica, but I've tried to take early steps towards supporting multiple replicas by e.g. having a separate secret for auth and state storage. Example CR: ```yaml apiVersion: tailscale.com/v1alpha1 kind: Recorder metadata: name: rec spec: enableUI: true ``` Updates #13298 Signed-off-by: Tom Proctor <tomhjp@users.noreply.github.com>pull/13443/head
parent
9f9470fc10
commit
98f4dd9857
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,6 @@
|
||||
apiVersion: tailscale.com/v1alpha1
|
||||
kind: Recorder
|
||||
metadata:
|
||||
name: recorder
|
||||
spec:
|
||||
enableUI: true
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,375 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
xslices "golang.org/x/exp/slices"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"tailscale.com/client/tailscale"
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
const (
|
||||
reasonRecorderCreationFailed = "RecorderCreationFailed"
|
||||
reasonRecorderCreated = "RecorderCreated"
|
||||
reasonRecorderInvalid = "RecorderInvalid"
|
||||
|
||||
currentProfileKey = "_current-profile"
|
||||
)
|
||||
|
||||
var gaugeRecorderResources = clientmetric.NewGauge(kubetypes.MetricRecorderCount)
|
||||
|
||||
// RecorderReconciler syncs Recorder statefulsets with their definition in
|
||||
// Recorder CRs.
|
||||
type RecorderReconciler struct {
|
||||
client.Client
|
||||
l *zap.SugaredLogger
|
||||
recorder record.EventRecorder
|
||||
clock tstime.Clock
|
||||
tsNamespace string
|
||||
tsClient tsClient
|
||||
|
||||
mu sync.Mutex // protects following
|
||||
recorders set.Slice[types.UID] // for recorders gauge
|
||||
}
|
||||
|
||||
func (r *RecorderReconciler) logger(name string) *zap.SugaredLogger {
|
||||
return r.l.With("Recorder", name)
|
||||
}
|
||||
|
||||
func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) {
|
||||
logger := r.logger(req.Name)
|
||||
logger.Debugf("starting reconcile")
|
||||
defer logger.Debugf("reconcile finished")
|
||||
|
||||
tsr := new(tsapi.Recorder)
|
||||
err = r.Get(ctx, req.NamespacedName, tsr)
|
||||
if apierrors.IsNotFound(err) {
|
||||
logger.Debugf("Recorder not found, assuming it was deleted")
|
||||
return reconcile.Result{}, nil
|
||||
} else if err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("failed to get tailscale.com Recorder: %w", err)
|
||||
}
|
||||
if markedForDeletion(tsr) {
|
||||
logger.Debugf("Recorder is being deleted, cleaning up resources")
|
||||
ix := xslices.Index(tsr.Finalizers, FinalizerName)
|
||||
if ix < 0 {
|
||||
logger.Debugf("no finalizer, nothing to do")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
if done, err := r.maybeCleanup(ctx, tsr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
} else if !done {
|
||||
logger.Debugf("Recorder resource cleanup not yet finished, will retry...")
|
||||
return reconcile.Result{RequeueAfter: shortRequeue}, nil
|
||||
}
|
||||
|
||||
tsr.Finalizers = slices.Delete(tsr.Finalizers, ix, ix+1)
|
||||
if err := r.Update(ctx, tsr); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
oldTSRStatus := tsr.Status.DeepCopy()
|
||||
setStatusReady := func(tsr *tsapi.Recorder, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) {
|
||||
tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, status, reason, message, tsr.Generation, r.clock, logger)
|
||||
if !apiequality.Semantic.DeepEqual(oldTSRStatus, tsr.Status) {
|
||||
// An error encountered here should get returned by the Reconcile function.
|
||||
if updateErr := r.Client.Status().Update(ctx, tsr); updateErr != nil {
|
||||
err = errors.Wrap(err, updateErr.Error())
|
||||
}
|
||||
}
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if !slices.Contains(tsr.Finalizers, FinalizerName) {
|
||||
// This log line is printed exactly once during initial provisioning,
|
||||
// because once the finalizer is in place this block gets skipped. So,
|
||||
// this is a nice place to log that the high level, multi-reconcile
|
||||
// operation is underway.
|
||||
logger.Infof("ensuring Recorder is set up")
|
||||
tsr.Finalizers = append(tsr.Finalizers, FinalizerName)
|
||||
if err := r.Update(ctx, tsr); err != nil {
|
||||
logger.Errorf("error adding finalizer: %w", err)
|
||||
return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderCreationFailed, reasonRecorderCreationFailed)
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.validate(tsr); err != nil {
|
||||
logger.Errorf("error validating Recorder spec: %w", err)
|
||||
message := fmt.Sprintf("Recorder is invalid: %s", err)
|
||||
r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderInvalid, message)
|
||||
return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderInvalid, message)
|
||||
}
|
||||
|
||||
if err = r.maybeProvision(ctx, tsr); err != nil {
|
||||
logger.Errorf("error creating Recorder resources: %w", err)
|
||||
message := fmt.Sprintf("failed creating Recorder: %s", err)
|
||||
r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderCreationFailed, message)
|
||||
return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderCreationFailed, message)
|
||||
}
|
||||
|
||||
logger.Info("Recorder resources synced")
|
||||
return setStatusReady(tsr, metav1.ConditionTrue, reasonRecorderCreated, reasonRecorderCreated)
|
||||
}
|
||||
|
||||
func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Recorder) error {
|
||||
logger := r.logger(tsr.Name)
|
||||
|
||||
r.mu.Lock()
|
||||
r.recorders.Add(tsr.UID)
|
||||
gaugeRecorderResources.Set(int64(r.recorders.Len()))
|
||||
r.mu.Unlock()
|
||||
|
||||
if err := r.ensureAuthSecretCreated(ctx, tsr); err != nil {
|
||||
return fmt.Errorf("error creating secrets: %w", err)
|
||||
}
|
||||
// State secret is precreated so we can use the Recorder CR as its owner ref.
|
||||
sec := tsrStateSecret(tsr, r.tsNamespace)
|
||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sec, func(s *corev1.Secret) {
|
||||
s.ObjectMeta.Labels = sec.ObjectMeta.Labels
|
||||
s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations
|
||||
s.ObjectMeta.OwnerReferences = sec.ObjectMeta.OwnerReferences
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error creating state Secret: %w", err)
|
||||
}
|
||||
sa := tsrServiceAccount(tsr, r.tsNamespace)
|
||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) {
|
||||
s.ObjectMeta.Labels = sa.ObjectMeta.Labels
|
||||
s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations
|
||||
s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error creating ServiceAccount: %w", err)
|
||||
}
|
||||
role := tsrRole(tsr, r.tsNamespace)
|
||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) {
|
||||
r.ObjectMeta.Labels = role.ObjectMeta.Labels
|
||||
r.ObjectMeta.Annotations = role.ObjectMeta.Annotations
|
||||
r.ObjectMeta.OwnerReferences = role.ObjectMeta.OwnerReferences
|
||||
r.Rules = role.Rules
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error creating Role: %w", err)
|
||||
}
|
||||
roleBinding := tsrRoleBinding(tsr, r.tsNamespace)
|
||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) {
|
||||
r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels
|
||||
r.ObjectMeta.Annotations = roleBinding.ObjectMeta.Annotations
|
||||
r.ObjectMeta.OwnerReferences = roleBinding.ObjectMeta.OwnerReferences
|
||||
r.RoleRef = roleBinding.RoleRef
|
||||
r.Subjects = roleBinding.Subjects
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error creating RoleBinding: %w", err)
|
||||
}
|
||||
ss := tsrStatefulSet(tsr, r.tsNamespace)
|
||||
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) {
|
||||
s.ObjectMeta.Labels = ss.ObjectMeta.Labels
|
||||
s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations
|
||||
s.ObjectMeta.OwnerReferences = ss.ObjectMeta.OwnerReferences
|
||||
s.Spec = ss.Spec
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error creating StatefulSet: %w", err)
|
||||
}
|
||||
|
||||
var devices []tsapi.TailnetDevice
|
||||
|
||||
device, ok, err := r.getDeviceInfo(ctx, tsr.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get device info: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
logger.Debugf("no Tailscale hostname known yet, waiting for Recorder pod to finish auth")
|
||||
return nil
|
||||
}
|
||||
|
||||
devices = append(devices, device)
|
||||
|
||||
tsr.Status.Devices = devices
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// maybeCleanup just deletes the device from the tailnet. All the kubernetes
|
||||
// resources linked to a Recorder will get cleaned up via owner references
|
||||
// (which we can use because they are all in the same namespace).
|
||||
func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Recorder) (bool, error) {
|
||||
logger := r.logger(tsr.Name)
|
||||
|
||||
id, _, ok, err := r.getNodeMetadata(ctx, tsr.Name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
logger.Debugf("state Secret %s-0 not found or does not contain node ID, continuing cleanup", tsr.Name)
|
||||
r.mu.Lock()
|
||||
r.recorders.Remove(tsr.UID)
|
||||
gaugeRecorderResources.Set(int64(r.recorders.Len()))
|
||||
r.mu.Unlock()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
logger.Debugf("deleting device %s from control", string(id))
|
||||
if err := r.tsClient.DeleteDevice(ctx, string(id)); err != nil {
|
||||
errResp := &tailscale.ErrResponse{}
|
||||
if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound {
|
||||
logger.Debugf("device %s not found, likely because it has already been deleted from control", string(id))
|
||||
} else {
|
||||
return false, fmt.Errorf("error deleting device: %w", err)
|
||||
}
|
||||
} else {
|
||||
logger.Debugf("device %s deleted from control", string(id))
|
||||
}
|
||||
|
||||
// Unlike most log entries in the reconcile loop, this will get printed
|
||||
// exactly once at the very end of cleanup, because the final step of
|
||||
// cleanup removes the tailscale finalizer, which will make all future
|
||||
// reconciles exit early.
|
||||
logger.Infof("cleaned up Recorder resources")
|
||||
r.mu.Lock()
|
||||
r.recorders.Remove(tsr.UID)
|
||||
gaugeRecorderResources.Set(int64(r.recorders.Len()))
|
||||
r.mu.Unlock()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *RecorderReconciler) ensureAuthSecretCreated(ctx context.Context, tsr *tsapi.Recorder) error {
|
||||
logger := r.logger(tsr.Name)
|
||||
key := types.NamespacedName{
|
||||
Namespace: r.tsNamespace,
|
||||
Name: tsr.Name,
|
||||
}
|
||||
if err := r.Get(ctx, key, &corev1.Secret{}); err == nil {
|
||||
// No updates, already created the auth key.
|
||||
logger.Debugf("auth Secret %s already exists", key.Name)
|
||||
return nil
|
||||
} else if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the auth key Secret which is going to be used by the StatefulSet
|
||||
// to authenticate with Tailscale.
|
||||
logger.Debugf("creating authkey for new Recorder")
|
||||
tags := tsr.Spec.Tags
|
||||
if len(tags) == 0 {
|
||||
tags = tsapi.Tags{"tag:k8s"}
|
||||
}
|
||||
authKey, err := newAuthKey(ctx, r.tsClient, tags.Stringify())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Debug("creating a new Secret for the Recorder")
|
||||
if err := r.Create(ctx, tsrAuthSecret(tsr, r.tsNamespace, authKey)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RecorderReconciler) validate(tsr *tsapi.Recorder) error {
|
||||
if !tsr.Spec.EnableUI && tsr.Spec.Storage.S3 == nil {
|
||||
return errors.New("must either enable UI or use S3 storage to ensure recordings are accessible")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getNodeMetadata returns 'ok == true' iff the node ID is found. The dnsName
|
||||
// is expected to always be non-empty if the node ID is, but not required.
|
||||
func (r *RecorderReconciler) getNodeMetadata(ctx context.Context, tsrName string) (id tailcfg.StableNodeID, dnsName string, ok bool, err error) {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: r.tsNamespace,
|
||||
Name: fmt.Sprintf("%s-0", tsrName),
|
||||
},
|
||||
}
|
||||
if err := r.Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return "", "", false, nil
|
||||
}
|
||||
|
||||
return "", "", false, err
|
||||
}
|
||||
|
||||
// TODO(tomhjp): Should maybe use ipn to parse the following info instead.
|
||||
currentProfile, ok := secret.Data[currentProfileKey]
|
||||
if !ok {
|
||||
return "", "", false, nil
|
||||
}
|
||||
profileBytes, ok := secret.Data[string(currentProfile)]
|
||||
if !ok {
|
||||
return "", "", false, nil
|
||||
}
|
||||
var profile profile
|
||||
if err := json.Unmarshal(profileBytes, &profile); err != nil {
|
||||
return "", "", false, fmt.Errorf("failed to extract node profile info from state Secret %s: %w", secret.Name, err)
|
||||
}
|
||||
|
||||
ok = profile.Config.NodeID != ""
|
||||
return tailcfg.StableNodeID(profile.Config.NodeID), profile.Config.UserProfile.LoginName, ok, nil
|
||||
}
|
||||
|
||||
func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string) (d tsapi.TailnetDevice, ok bool, err error) {
|
||||
nodeID, dnsName, ok, err := r.getNodeMetadata(ctx, tsrName)
|
||||
if !ok || err != nil {
|
||||
return tsapi.TailnetDevice{}, false, err
|
||||
}
|
||||
|
||||
// TODO(tomhjp): The profile info doesn't include addresses, which is why we
|
||||
// need the API. Should we instead update the profile to include addresses?
|
||||
device, err := r.tsClient.Device(ctx, string(nodeID), nil)
|
||||
if err != nil {
|
||||
return tsapi.TailnetDevice{}, false, fmt.Errorf("failed to get device info from API: %w", err)
|
||||
}
|
||||
|
||||
d = tsapi.TailnetDevice{
|
||||
Hostname: device.Hostname,
|
||||
TailnetIPs: device.Addresses,
|
||||
}
|
||||
if dnsName != "" {
|
||||
d.URL = fmt.Sprintf("https://%s", dnsName)
|
||||
}
|
||||
|
||||
return d, true, nil
|
||||
}
|
||||
|
||||
type profile struct {
|
||||
Config struct {
|
||||
NodeID string `json:"NodeID"`
|
||||
UserProfile struct {
|
||||
LoginName string `json:"LoginName"`
|
||||
} `json:"UserProfile"`
|
||||
} `json:"Config"`
|
||||
}
|
||||
|
||||
func markedForDeletion(tsr *tsapi.Recorder) bool {
|
||||
return !tsr.DeletionTimestamp.IsZero()
|
||||
}
|
@ -0,0 +1,278 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
func tsrStatefulSet(tsr *tsapi.Recorder, namespace string) *appsv1.StatefulSet {
|
||||
return &appsv1.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tsr.Name,
|
||||
Namespace: namespace,
|
||||
Labels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Labels),
|
||||
OwnerReferences: tsrOwnerReference(tsr),
|
||||
Annotations: tsr.Spec.StatefulSet.Annotations,
|
||||
},
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Replicas: ptr.To[int32](1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels),
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tsr.Name,
|
||||
Namespace: namespace,
|
||||
Labels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels),
|
||||
Annotations: tsr.Spec.StatefulSet.Pod.Annotations,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
ServiceAccountName: tsr.Name,
|
||||
Affinity: tsr.Spec.StatefulSet.Pod.Affinity,
|
||||
SecurityContext: tsr.Spec.StatefulSet.Pod.SecurityContext,
|
||||
ImagePullSecrets: tsr.Spec.StatefulSet.Pod.ImagePullSecrets,
|
||||
NodeSelector: tsr.Spec.StatefulSet.Pod.NodeSelector,
|
||||
Tolerations: tsr.Spec.StatefulSet.Pod.Tolerations,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "recorder",
|
||||
Image: func() string {
|
||||
image := tsr.Spec.StatefulSet.Pod.Container.Image
|
||||
if image == "" {
|
||||
image = fmt.Sprintf("tailscale/tsrecorder:%s", selfVersionImageTag())
|
||||
}
|
||||
|
||||
return image
|
||||
}(),
|
||||
ImagePullPolicy: tsr.Spec.StatefulSet.Pod.Container.ImagePullPolicy,
|
||||
Resources: tsr.Spec.StatefulSet.Pod.Container.Resources,
|
||||
SecurityContext: tsr.Spec.StatefulSet.Pod.Container.SecurityContext,
|
||||
Env: env(tsr),
|
||||
EnvFrom: func() []corev1.EnvFromSource {
|
||||
if tsr.Spec.Storage.S3 == nil || tsr.Spec.Storage.S3.Credentials.Secret.Name == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return []corev1.EnvFromSource{{
|
||||
SecretRef: &corev1.SecretEnvSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: tsr.Spec.Storage.S3.Credentials.Secret.Name,
|
||||
},
|
||||
},
|
||||
}}
|
||||
}(),
|
||||
Command: []string{"/tsrecorder"},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "data",
|
||||
MountPath: "/data",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "data",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func tsrServiceAccount(tsr *tsapi.Recorder, namespace string) *corev1.ServiceAccount {
|
||||
return &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tsr.Name,
|
||||
Namespace: namespace,
|
||||
Labels: labels("recorder", tsr.Name, nil),
|
||||
OwnerReferences: tsrOwnerReference(tsr),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func tsrRole(tsr *tsapi.Recorder, namespace string) *rbacv1.Role {
|
||||
return &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tsr.Name,
|
||||
Namespace: namespace,
|
||||
Labels: labels("recorder", tsr.Name, nil),
|
||||
OwnerReferences: tsrOwnerReference(tsr),
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"secrets"},
|
||||
Verbs: []string{
|
||||
"get",
|
||||
"patch",
|
||||
"update",
|
||||
},
|
||||
ResourceNames: []string{
|
||||
tsr.Name, // Contains the auth key.
|
||||
fmt.Sprintf("%s-0", tsr.Name), // Contains the node state.
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func tsrRoleBinding(tsr *tsapi.Recorder, namespace string) *rbacv1.RoleBinding {
|
||||
return &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tsr.Name,
|
||||
Namespace: namespace,
|
||||
Labels: labels("recorder", tsr.Name, nil),
|
||||
OwnerReferences: tsrOwnerReference(tsr),
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: tsr.Name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Kind: "Role",
|
||||
Name: tsr.Name,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func tsrAuthSecret(tsr *tsapi.Recorder, namespace string, authKey string) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: tsr.Name,
|
||||
Labels: labels("recorder", tsr.Name, nil),
|
||||
OwnerReferences: tsrOwnerReference(tsr),
|
||||
},
|
||||
StringData: map[string]string{
|
||||
"authkey": authKey,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func tsrStateSecret(tsr *tsapi.Recorder, namespace string) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-0", tsr.Name),
|
||||
Namespace: namespace,
|
||||
Labels: labels("recorder", tsr.Name, nil),
|
||||
OwnerReferences: tsrOwnerReference(tsr),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func env(tsr *tsapi.Recorder) []corev1.EnvVar {
|
||||
envs := []corev1.EnvVar{
|
||||
{
|
||||
Name: "TS_AUTHKEY",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: tsr.Name,
|
||||
},
|
||||
Key: "authkey",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
// Secret is named after the pod.
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "TS_STATE",
|
||||
Value: "kube:$(POD_NAME)",
|
||||
},
|
||||
{
|
||||
Name: "TSRECORDER_HOSTNAME",
|
||||
Value: "$(POD_NAME)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, env := range tsr.Spec.StatefulSet.Pod.Container.Env {
|
||||
envs = append(envs, corev1.EnvVar{
|
||||
Name: string(env.Name),
|
||||
Value: env.Value,
|
||||
})
|
||||
}
|
||||
|
||||
if tsr.Spec.Storage.S3 != nil {
|
||||
envs = append(envs,
|
||||
corev1.EnvVar{
|
||||
Name: "TSRECORDER_DST",
|
||||
Value: fmt.Sprintf("s3://%s", tsr.Spec.Storage.S3.Endpoint),
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "TSRECORDER_BUCKET",
|
||||
Value: tsr.Spec.Storage.S3.Bucket,
|
||||
},
|
||||
)
|
||||
} else {
|
||||
envs = append(envs, corev1.EnvVar{
|
||||
Name: "TSRECORDER_DST",
|
||||
Value: "/data/recordings",
|
||||
})
|
||||
}
|
||||
|
||||
if tsr.Spec.EnableUI {
|
||||
envs = append(envs, corev1.EnvVar{
|
||||
Name: "TSRECORDER_UI",
|
||||
Value: "true",
|
||||
})
|
||||
}
|
||||
|
||||
return envs
|
||||
}
|
||||
|
||||
func labels(app, instance string, customLabels map[string]string) map[string]string {
|
||||
l := make(map[string]string, len(customLabels)+3)
|
||||
for k, v := range customLabels {
|
||||
l[k] = v
|
||||
}
|
||||
|
||||
// ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/
|
||||
l["app.kubernetes.io/name"] = app
|
||||
l["app.kubernetes.io/instance"] = instance
|
||||
l["app.kubernetes.io/managed-by"] = "tailscale-operator"
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func tsrOwnerReference(owner metav1.Object) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{*metav1.NewControllerRef(owner, tsapi.SchemeGroupVersion.WithKind("Recorder"))}
|
||||
}
|
||||
|
||||
// selfVersionImageTag returns the container image tag of the running operator
|
||||
// build.
|
||||
func selfVersionImageTag() string {
|
||||
meta := version.GetMeta()
|
||||
var versionPrefix string
|
||||
if meta.UnstableBranch {
|
||||
versionPrefix = "unstable-"
|
||||
}
|
||||
return fmt.Sprintf("%sv%s", versionPrefix, meta.MajorMinorPatch)
|
||||
}
|
@ -0,0 +1,143 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
func TestRecorderSpecs(t *testing.T) {
|
||||
t.Run("ensure spec fields are passed through correctly", func(t *testing.T) {
|
||||
tsr := &tsapi.Recorder{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
Spec: tsapi.RecorderSpec{
|
||||
StatefulSet: tsapi.RecorderStatefulSet{
|
||||
Labels: map[string]string{
|
||||
"ss-label-key": "ss-label-value",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"ss-annotation-key": "ss-annotation-value",
|
||||
},
|
||||
Pod: tsapi.RecorderPod{
|
||||
Labels: map[string]string{
|
||||
"pod-label-key": "pod-label-value",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"pod-annotation-key": "pod-annotation-value",
|
||||
},
|
||||
Affinity: &corev1.Affinity{
|
||||
PodAffinity: &corev1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"match-label": "match-value",
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsUser: ptr.To[int64](1000),
|
||||
},
|
||||
ImagePullSecrets: []corev1.LocalObjectReference{{
|
||||
Name: "img-pull",
|
||||
}},
|
||||
NodeSelector: map[string]string{
|
||||
"some-node": "selector",
|
||||
},
|
||||
Tolerations: []corev1.Toleration{{
|
||||
Key: "key",
|
||||
Value: "value",
|
||||
TolerationSeconds: ptr.To[int64](60),
|
||||
}},
|
||||
Container: tsapi.RecorderContainer{
|
||||
Env: []tsapi.Env{{
|
||||
Name: "some_env",
|
||||
Value: "env_value",
|
||||
}},
|
||||
Image: "custom-image",
|
||||
ImagePullPolicy: corev1.PullAlways,
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Capabilities: &corev1.Capabilities{
|
||||
Add: []corev1.Capability{
|
||||
"NET_ADMIN",
|
||||
},
|
||||
},
|
||||
},
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Limits: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("100m"),
|
||||
},
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("50m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ss := tsrStatefulSet(tsr, tsNamespace)
|
||||
|
||||
// StatefulSet-level.
|
||||
if diff := cmp.Diff(ss.Annotations, tsr.Spec.StatefulSet.Annotations); diff != "" {
|
||||
t.Errorf("(-got +want):\n%s", diff)
|
||||
}
|
||||
if diff := cmp.Diff(ss.Spec.Template.Annotations, tsr.Spec.StatefulSet.Pod.Annotations); diff != "" {
|
||||
t.Errorf("(-got +want):\n%s", diff)
|
||||
}
|
||||
|
||||
// Pod-level.
|
||||
if diff := cmp.Diff(ss.Labels, labels("recorder", "test", tsr.Spec.StatefulSet.Labels)); diff != "" {
|
||||
t.Errorf("(-got +want):\n%s", diff)
|
||||
}
|
||||
if diff := cmp.Diff(ss.Spec.Template.Labels, labels("recorder", "test", tsr.Spec.StatefulSet.Pod.Labels)); diff != "" {
|
||||
t.Errorf("(-got +want):\n%s", diff)
|
||||
}
|
||||
if diff := cmp.Diff(ss.Spec.Template.Spec.Affinity, tsr.Spec.StatefulSet.Pod.Affinity); diff != "" {
|
||||
t.Errorf("(-got +want):\n%s", diff)
|
||||
}
|
||||
if diff := cmp.Diff(ss.Spec.Template.Spec.SecurityContext, tsr.Spec.StatefulSet.Pod.SecurityContext); diff != "" {
|
||||
t.Errorf("(-got +want):\n%s", diff)
|
||||
}
|
||||
if diff := cmp.Diff(ss.Spec.Template.Spec.ImagePullSecrets, tsr.Spec.StatefulSet.Pod.ImagePullSecrets); diff != "" {
|
||||
t.Errorf("(-got +want):\n%s", diff)
|
||||
}
|
||||
if diff := cmp.Diff(ss.Spec.Template.Spec.NodeSelector, tsr.Spec.StatefulSet.Pod.NodeSelector); diff != "" {
|
||||
t.Errorf("(-got +want):\n%s", diff)
|
||||
}
|
||||
if diff := cmp.Diff(ss.Spec.Template.Spec.Tolerations, tsr.Spec.StatefulSet.Pod.Tolerations); diff != "" {
|
||||
t.Errorf("(-got +want):\n%s", diff)
|
||||
}
|
||||
|
||||
// Container-level.
|
||||
if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Env, env(tsr)); diff != "" {
|
||||
t.Errorf("(-got +want):\n%s", diff)
|
||||
}
|
||||
if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Image, tsr.Spec.StatefulSet.Pod.Container.Image); diff != "" {
|
||||
t.Errorf("(-got +want):\n%s", diff)
|
||||
}
|
||||
if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].ImagePullPolicy, tsr.Spec.StatefulSet.Pod.Container.ImagePullPolicy); diff != "" {
|
||||
t.Errorf("(-got +want):\n%s", diff)
|
||||
}
|
||||
if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].SecurityContext, tsr.Spec.StatefulSet.Pod.Container.SecurityContext); diff != "" {
|
||||
t.Errorf("(-got +want):\n%s", diff)
|
||||
}
|
||||
if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Resources, tsr.Spec.StatefulSet.Pod.Container.Resources); diff != "" {
|
||||
t.Errorf("(-got +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
@ -0,0 +1,162 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"go.uber.org/zap"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/tstest"
|
||||
)
|
||||
|
||||
const tsNamespace = "tailscale"
|
||||
|
||||
func TestRecorder(t *testing.T) {
|
||||
tsr := &tsapi.Recorder{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Finalizers: []string{"tailscale.com/finalizer"},
|
||||
},
|
||||
}
|
||||
|
||||
fc := fake.NewClientBuilder().
|
||||
WithScheme(tsapi.GlobalScheme).
|
||||
WithObjects(tsr).
|
||||
WithStatusSubresource(tsr).
|
||||
Build()
|
||||
tsClient := &fakeTSClient{}
|
||||
zl, _ := zap.NewDevelopment()
|
||||
fr := record.NewFakeRecorder(1)
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
reconciler := &RecorderReconciler{
|
||||
tsNamespace: tsNamespace,
|
||||
Client: fc,
|
||||
tsClient: tsClient,
|
||||
recorder: fr,
|
||||
l: zl.Sugar(),
|
||||
clock: cl,
|
||||
}
|
||||
|
||||
t.Run("invalid spec gives an error condition", func(t *testing.T) {
|
||||
expectReconciled(t, reconciler, "", tsr.Name)
|
||||
|
||||
msg := "Recorder is invalid: must either enable UI or use S3 storage to ensure recordings are accessible"
|
||||
tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, metav1.ConditionFalse, reasonRecorderInvalid, msg, 0, cl, zl.Sugar())
|
||||
expectEqual(t, fc, tsr, nil)
|
||||
if expected := 0; reconciler.recorders.Len() != expected {
|
||||
t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len())
|
||||
}
|
||||
expectRecorderResources(t, fc, tsr, false)
|
||||
|
||||
expectedEvent := "Warning RecorderInvalid Recorder is invalid: must either enable UI or use S3 storage to ensure recordings are accessible"
|
||||
expectEvents(t, fr, []string{expectedEvent})
|
||||
})
|
||||
|
||||
t.Run("observe Ready=true status condition for a valid spec", func(t *testing.T) {
|
||||
tsr.Spec.EnableUI = true
|
||||
mustUpdate(t, fc, "", "test", func(t *tsapi.Recorder) {
|
||||
t.Spec = tsr.Spec
|
||||
})
|
||||
|
||||
expectReconciled(t, reconciler, "", tsr.Name)
|
||||
|
||||
tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, metav1.ConditionTrue, reasonRecorderCreated, reasonRecorderCreated, 0, cl, zl.Sugar())
|
||||
expectEqual(t, fc, tsr, nil)
|
||||
if expected := 1; reconciler.recorders.Len() != expected {
|
||||
t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len())
|
||||
}
|
||||
expectRecorderResources(t, fc, tsr, true)
|
||||
})
|
||||
|
||||
t.Run("populate node info in state secret, and see it appear in status", func(t *testing.T) {
|
||||
bytes, err := json.Marshal(map[string]any{
|
||||
"Config": map[string]any{
|
||||
"NodeID": "nodeid-123",
|
||||
"UserProfile": map[string]any{
|
||||
"LoginName": "test-0.example.ts.net",
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
const key = "profile-abc"
|
||||
mustUpdate(t, fc, tsNamespace, "test-0", func(s *corev1.Secret) {
|
||||
s.Data = map[string][]byte{
|
||||
currentProfileKey: []byte(key),
|
||||
key: bytes,
|
||||
}
|
||||
})
|
||||
|
||||
expectReconciled(t, reconciler, "", tsr.Name)
|
||||
tsr.Status.Devices = []tsapi.TailnetDevice{
|
||||
{
|
||||
Hostname: "test-device",
|
||||
TailnetIPs: []string{"1.2.3.4", "::1"},
|
||||
URL: "https://test-0.example.ts.net",
|
||||
},
|
||||
}
|
||||
expectEqual(t, fc, tsr, nil)
|
||||
})
|
||||
|
||||
t.Run("delete the Recorder and observe cleanup", func(t *testing.T) {
|
||||
if err := fc.Delete(context.Background(), tsr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectReconciled(t, reconciler, "", tsr.Name)
|
||||
|
||||
expectMissing[tsapi.Recorder](t, fc, "", tsr.Name)
|
||||
if expected := 0; reconciler.recorders.Len() != expected {
|
||||
t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len())
|
||||
}
|
||||
if diff := cmp.Diff(tsClient.deleted, []string{"nodeid-123"}); diff != "" {
|
||||
t.Fatalf("unexpected deleted devices (-got +want):\n%s", diff)
|
||||
}
|
||||
// The fake client does not clean up objects whose owner has been
|
||||
// deleted, so we can't test for the owned resources getting deleted.
|
||||
})
|
||||
}
|
||||
|
||||
func expectRecorderResources(t *testing.T, fc client.WithWatch, tsr *tsapi.Recorder, shouldExist bool) {
|
||||
t.Helper()
|
||||
|
||||
auth := tsrAuthSecret(tsr, tsNamespace, "secret-authkey")
|
||||
state := tsrStateSecret(tsr, tsNamespace)
|
||||
role := tsrRole(tsr, tsNamespace)
|
||||
roleBinding := tsrRoleBinding(tsr, tsNamespace)
|
||||
serviceAccount := tsrServiceAccount(tsr, tsNamespace)
|
||||
statefulSet := tsrStatefulSet(tsr, tsNamespace)
|
||||
|
||||
if shouldExist {
|
||||
expectEqual(t, fc, auth, nil)
|
||||
expectEqual(t, fc, state, nil)
|
||||
expectEqual(t, fc, role, nil)
|
||||
expectEqual(t, fc, roleBinding, nil)
|
||||
expectEqual(t, fc, serviceAccount, nil)
|
||||
expectEqual(t, fc, statefulSet, nil)
|
||||
} else {
|
||||
expectMissing[corev1.Secret](t, fc, auth.Namespace, auth.Name)
|
||||
expectMissing[corev1.Secret](t, fc, state.Namespace, state.Name)
|
||||
expectMissing[rbacv1.Role](t, fc, role.Namespace, role.Name)
|
||||
expectMissing[rbacv1.RoleBinding](t, fc, roleBinding.Namespace, roleBinding.Name)
|
||||
expectMissing[corev1.ServiceAccount](t, fc, serviceAccount.Namespace, serviceAccount.Name)
|
||||
expectMissing[appsv1.StatefulSet](t, fc, statefulSet.Namespace, statefulSet.Name)
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue