cmd/k8s-operator,k8s-operator,kube: Add TSRecorder CRD + controller (#13299)

cmd/k8s-operator,k8s-operator,kube: Add TSRecorder CRD + controller

Deploys tsrecorder images to the operator's cluster. S3 storage is
configured via environment variables from a k8s Secret. Currently
only supports a single tsrecorder replica, but I've tried to take early
steps towards supporting multiple replicas by e.g. having a separate
secret for auth and state storage.

Example CR:

```yaml
apiVersion: tailscale.com/v1alpha1
kind: Recorder
metadata:
  name: rec
spec:
  enableUI: true
```

Updates #13298

Signed-off-by: Tom Proctor <tomhjp@users.noreply.github.com>
pull/13443/head
Tom Proctor 1 week ago committed by GitHub
parent 9f9470fc10
commit 98f4dd9857
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -27,6 +27,9 @@ rules:
- apiGroups: ["tailscale.com"] - apiGroups: ["tailscale.com"]
resources: ["dnsconfigs", "dnsconfigs/status"] resources: ["dnsconfigs", "dnsconfigs/status"]
verbs: ["get", "list", "watch", "update"] verbs: ["get", "list", "watch", "update"]
- apiGroups: ["tailscale.com"]
resources: ["recorders", "recorders/status"]
verbs: ["get", "list", "watch", "update"]
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
@ -56,6 +59,9 @@ rules:
- apiGroups: ["discovery.k8s.io"] - apiGroups: ["discovery.k8s.io"]
resources: ["endpointslices"] resources: ["endpointslices"]
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles", "rolebindings"]
verbs: ["get", "create", "patch", "update", "list", "watch"]
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding kind: RoleBinding

@ -89,14 +89,14 @@ spec:
type: object type: object
properties: properties:
image: image:
description: Nameserver image. description: Nameserver image. Defaults to tailscale/k8s-nameserver:unstable.
type: object type: object
properties: properties:
repo: repo:
description: Repo defaults to tailscale/k8s-nameserver. description: Repo defaults to tailscale/k8s-nameserver.
type: string type: string
tag: tag:
description: Tag defaults to operator's own tag. description: Tag defaults to unstable.
type: string type: string
status: status:
description: |- description: |-

File diff suppressed because it is too large Load Diff

@ -0,0 +1,6 @@
apiVersion: tailscale.com/v1alpha1
kind: Recorder
metadata:
name: recorder
spec:
enableUI: true

File diff suppressed because it is too large Load Diff

@ -24,10 +24,12 @@ const (
connectorCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_connectors.yaml" connectorCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_connectors.yaml"
proxyClassCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxyclasses.yaml" proxyClassCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxyclasses.yaml"
dnsConfigCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_dnsconfigs.yaml" dnsConfigCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_dnsconfigs.yaml"
recorderCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_recorders.yaml"
helmTemplatesPath = operatorDeploymentFilesPath + "/chart/templates" helmTemplatesPath = operatorDeploymentFilesPath + "/chart/templates"
connectorCRDHelmTemplatePath = helmTemplatesPath + "/connector.yaml" connectorCRDHelmTemplatePath = helmTemplatesPath + "/connector.yaml"
proxyClassCRDHelmTemplatePath = helmTemplatesPath + "/proxyclass.yaml" proxyClassCRDHelmTemplatePath = helmTemplatesPath + "/proxyclass.yaml"
dnsConfigCRDHelmTemplatePath = helmTemplatesPath + "/dnsconfig.yaml" dnsConfigCRDHelmTemplatePath = helmTemplatesPath + "/dnsconfig.yaml"
recorderCRDHelmTemplatePath = helmTemplatesPath + "/recorder.yaml"
helmConditionalStart = "{{ if .Values.installCRDs -}}\n" helmConditionalStart = "{{ if .Values.installCRDs -}}\n"
helmConditionalEnd = "{{- end -}}" helmConditionalEnd = "{{- end -}}"
@ -111,7 +113,7 @@ func main() {
} }
} }
// generate places tailscale.com CRDs (currently Connector, ProxyClass and DNSConfig) into // generate places tailscale.com CRDs (currently Connector, ProxyClass, DNSConfig, Recorder) into
// the Helm chart templates behind .Values.installCRDs=true condition (true by // the Helm chart templates behind .Values.installCRDs=true condition (true by
// default). // default).
func generate(baseDir string) error { func generate(baseDir string) error {
@ -137,28 +139,32 @@ func generate(baseDir string) error {
} }
return nil return nil
} }
if err := addCRDToHelm(connectorCRDPath, connectorCRDHelmTemplatePath); err != nil { for _, crd := range []struct {
return fmt.Errorf("error adding Connector CRD to Helm templates: %w", err) crdPath, templatePath string
} }{
if err := addCRDToHelm(proxyClassCRDPath, proxyClassCRDHelmTemplatePath); err != nil { {connectorCRDPath, connectorCRDHelmTemplatePath},
return fmt.Errorf("error adding ProxyClass CRD to Helm templates: %w", err) {proxyClassCRDPath, proxyClassCRDHelmTemplatePath},
} {dnsConfigCRDPath, dnsConfigCRDHelmTemplatePath},
if err := addCRDToHelm(dnsConfigCRDPath, dnsConfigCRDHelmTemplatePath); err != nil { {recorderCRDPath, recorderCRDHelmTemplatePath},
return fmt.Errorf("error adding DNSConfig CRD to Helm templates: %w", err) } {
if err := addCRDToHelm(crd.crdPath, crd.templatePath); err != nil {
return fmt.Errorf("error adding %s CRD to Helm templates: %w", crd.crdPath, err)
}
} }
return nil return nil
} }
func cleanup(baseDir string) error { func cleanup(baseDir string) error {
log.Print("Cleaning up CRD from Helm templates") log.Print("Cleaning up CRD from Helm templates")
if err := os.Remove(filepath.Join(baseDir, connectorCRDHelmTemplatePath)); err != nil && !os.IsNotExist(err) { for _, path := range []string{
return fmt.Errorf("error cleaning up Connector CRD template: %w", err) connectorCRDHelmTemplatePath,
} proxyClassCRDHelmTemplatePath,
if err := os.Remove(filepath.Join(baseDir, proxyClassCRDHelmTemplatePath)); err != nil && !os.IsNotExist(err) { dnsConfigCRDHelmTemplatePath,
return fmt.Errorf("error cleaning up ProxyClass CRD template: %w", err) recorderCRDHelmTemplatePath,
} } {
if err := os.Remove(filepath.Join(baseDir, dnsConfigCRDHelmTemplatePath)); err != nil && !os.IsNotExist(err) { if err := os.Remove(filepath.Join(baseDir, path)); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("error cleaning up DNSConfig CRD template: %w", err) return fmt.Errorf("error cleaning up %s: %w", path, err)
}
} }
return nil return nil
} }

@ -59,6 +59,9 @@ func Test_generate(t *testing.T) {
if !strings.Contains(installContentsWithCRD.String(), "name: dnsconfigs.tailscale.com") { if !strings.Contains(installContentsWithCRD.String(), "name: dnsconfigs.tailscale.com") {
t.Errorf("DNSConfig CRD not found in default chart install") t.Errorf("DNSConfig CRD not found in default chart install")
} }
if !strings.Contains(installContentsWithCRD.String(), "name: recorders.tailscale.com") {
t.Errorf("Recorder CRD not found in default chart install")
}
// Test that CRDs can be excluded from Helm chart install // Test that CRDs can be excluded from Helm chart install
installContentsWithoutCRD := bytes.NewBuffer([]byte{}) installContentsWithoutCRD := bytes.NewBuffer([]byte{})
@ -77,4 +80,7 @@ func Test_generate(t *testing.T) {
if strings.Contains(installContentsWithoutCRD.String(), "name: dnsconfigs.tailscale.com") { if strings.Contains(installContentsWithoutCRD.String(), "name: dnsconfigs.tailscale.com") {
t.Errorf("DNSConfig CRD found in chart install that should not contain a CRD") t.Errorf("DNSConfig CRD found in chart install that should not contain a CRD")
} }
if strings.Contains(installContentsWithoutCRD.String(), "name: recorders.tailscale.com") {
t.Errorf("Recorder CRD found in chart install that should not contain a CRD")
}
} }

@ -33,7 +33,7 @@ func TestNameserverReconciler(t *testing.T) {
}, },
Spec: tsapi.DNSConfigSpec{ Spec: tsapi.DNSConfigSpec{
Nameserver: &tsapi.Nameserver{ Nameserver: &tsapi.Nameserver{
Image: &tsapi.Image{ Image: &tsapi.NameserverImage{
Repo: "test", Repo: "test",
Tag: "v0.0.1", Tag: "v0.0.1",
}, },

@ -22,6 +22,7 @@ import (
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
discoveryv1 "k8s.io/api/discovery/v1" discoveryv1 "k8s.io/api/discovery/v1"
networkingv1 "k8s.io/api/networking/v1" networkingv1 "k8s.io/api/networking/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/builder"
@ -241,6 +242,8 @@ func runReconcilers(opts reconcilerOpts) {
&appsv1.StatefulSet{}: nsFilter, &appsv1.StatefulSet{}: nsFilter,
&appsv1.Deployment{}: nsFilter, &appsv1.Deployment{}: nsFilter,
&discoveryv1.EndpointSlice{}: nsFilter, &discoveryv1.EndpointSlice{}: nsFilter,
&rbacv1.Role{}: nsFilter,
&rbacv1.RoleBinding{}: nsFilter,
}, },
}, },
Scheme: tsapi.GlobalScheme, Scheme: tsapi.GlobalScheme,
@ -389,6 +392,28 @@ func runReconcilers(opts reconcilerOpts) {
if err != nil { if err != nil {
startlog.Fatalf("could not create DNS records reconciler: %v", err) startlog.Fatalf("could not create DNS records reconciler: %v", err)
} }
// Recorder reconciler.
recorderFilter := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &tsapi.Recorder{})
err = builder.ControllerManagedBy(mgr).
For(&tsapi.Recorder{}).
Watches(&appsv1.StatefulSet{}, recorderFilter).
Watches(&corev1.ServiceAccount{}, recorderFilter).
Watches(&corev1.Secret{}, recorderFilter).
Watches(&rbacv1.Role{}, recorderFilter).
Watches(&rbacv1.RoleBinding{}, recorderFilter).
Complete(&RecorderReconciler{
recorder: eventRecorder,
tsNamespace: opts.tailscaleNamespace,
Client: mgr.GetClient(),
l: opts.log.Named("recorder-reconciler"),
clock: tstime.DefaultClock{},
tsClient: opts.tsClient,
})
if err != nil {
startlog.Fatalf("could not create Recorder reconciler: %v", err)
}
startlog.Infof("Startup complete, operator running, version: %s", version.Long()) startlog.Infof("Startup complete, operator running, version: %s", version.Long())
if err := mgr.Start(signals.SetupSignalHandler()); err != nil { if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
startlog.Fatalf("could not start manager: %v", err) startlog.Fatalf("could not start manager: %v", err)
@ -525,6 +550,7 @@ func dnsRecordsReconcilerIngressHandler(ns string, isDefaultLoadBalancer bool, c
type tsClient interface { type tsClient interface {
CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error) CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error)
Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error)
DeleteDevice(ctx context.Context, nodeStableID string) error DeleteDevice(ctx context.Context, nodeStableID string) error
} }

@ -343,7 +343,7 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *
if len(tags) == 0 { if len(tags) == 0 {
tags = a.defaultTags tags = a.defaultTags
} }
authKey, err = a.newAuthKey(ctx, tags) authKey, err = newAuthKey(ctx, a.tsClient, tags)
if err != nil { if err != nil {
return "", "", nil, err return "", "", nil, err
} }
@ -419,6 +419,11 @@ func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map
if sec == nil { if sec == nil {
return "", "", nil, nil return "", "", nil, nil
} }
return deviceInfo(sec)
}
func deviceInfo(sec *corev1.Secret) (id tailcfg.StableNodeID, hostname string, ips []string, err error) {
id = tailcfg.StableNodeID(sec.Data["device_id"]) id = tailcfg.StableNodeID(sec.Data["device_id"])
if id == "" { if id == "" {
return "", "", nil, nil return "", "", nil, nil
@ -442,7 +447,7 @@ func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map
return id, hostname, ips, nil return id, hostname, ips, nil
} }
func (a *tailscaleSTSReconciler) newAuthKey(ctx context.Context, tags []string) (string, error) { func newAuthKey(ctx context.Context, tsClient tsClient, tags []string) (string, error) {
caps := tailscale.KeyCapabilities{ caps := tailscale.KeyCapabilities{
Devices: tailscale.KeyDeviceCapabilities{ Devices: tailscale.KeyDeviceCapabilities{
Create: tailscale.KeyDeviceCreateCapabilities{ Create: tailscale.KeyDeviceCreateCapabilities{
@ -453,7 +458,7 @@ func (a *tailscaleSTSReconciler) newAuthKey(ctx context.Context, tags []string)
}, },
} }
key, _, err := a.tsClient.CreateKey(ctx, caps) key, _, err := tsClient.CreateKey(ctx, caps)
if err != nil { if err != nil {
return "", err return "", err
} }

@ -9,6 +9,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"net/netip" "net/netip"
"reflect"
"strings" "strings"
"sync" "sync"
"testing" "testing"
@ -487,7 +488,7 @@ func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want
modifier(got) modifier(got)
} }
if diff := cmp.Diff(got, want); diff != "" { if diff := cmp.Diff(got, want); diff != "" {
t.Fatalf("unexpected object (-got +want):\n%s", diff) t.Fatalf("unexpected %s (-got +want):\n%s", reflect.TypeOf(want).Elem().Name(), diff)
} }
} }
@ -498,7 +499,7 @@ func expectMissing[T any, O ptrObject[T]](t *testing.T, client client.Client, ns
Name: name, Name: name,
Namespace: ns, Namespace: ns,
}, obj); !apierrors.IsNotFound(err) { }, obj); !apierrors.IsNotFound(err) {
t.Fatalf("object %s/%s unexpectedly present, wanted missing", ns, name) t.Fatalf("%s %s/%s unexpectedly present, wanted missing", reflect.TypeOf(obj).Elem().Name(), ns, name)
} }
} }
@ -592,6 +593,17 @@ func (c *fakeTSClient) CreateKey(ctx context.Context, caps tailscale.KeyCapabili
return "secret-authkey", k, nil return "secret-authkey", k, nil
} }
func (c *fakeTSClient) Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error) {
return &tailscale.Device{
DeviceID: deviceID,
Hostname: "test-device",
Addresses: []string{
"1.2.3.4",
"::1",
},
}, nil
}
func (c *fakeTSClient) DeleteDevice(ctx context.Context, deviceID string) error { func (c *fakeTSClient) DeleteDevice(ctx context.Context, deviceID string) error {
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()

@ -0,0 +1,375 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !plan9
package main
import (
"context"
"encoding/json"
"fmt"
"net/http"
"slices"
"sync"
"github.com/pkg/errors"
"go.uber.org/zap"
xslices "golang.org/x/exp/slices"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"tailscale.com/client/tailscale"
tsoperator "tailscale.com/k8s-operator"
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
"tailscale.com/kube/kubetypes"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/util/clientmetric"
"tailscale.com/util/set"
)
const (
reasonRecorderCreationFailed = "RecorderCreationFailed"
reasonRecorderCreated = "RecorderCreated"
reasonRecorderInvalid = "RecorderInvalid"
currentProfileKey = "_current-profile"
)
var gaugeRecorderResources = clientmetric.NewGauge(kubetypes.MetricRecorderCount)
// RecorderReconciler syncs Recorder statefulsets with their definition in
// Recorder CRs.
type RecorderReconciler struct {
client.Client
l *zap.SugaredLogger
recorder record.EventRecorder
clock tstime.Clock
tsNamespace string
tsClient tsClient
mu sync.Mutex // protects following
recorders set.Slice[types.UID] // for recorders gauge
}
func (r *RecorderReconciler) logger(name string) *zap.SugaredLogger {
return r.l.With("Recorder", name)
}
func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) {
logger := r.logger(req.Name)
logger.Debugf("starting reconcile")
defer logger.Debugf("reconcile finished")
tsr := new(tsapi.Recorder)
err = r.Get(ctx, req.NamespacedName, tsr)
if apierrors.IsNotFound(err) {
logger.Debugf("Recorder not found, assuming it was deleted")
return reconcile.Result{}, nil
} else if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get tailscale.com Recorder: %w", err)
}
if markedForDeletion(tsr) {
logger.Debugf("Recorder is being deleted, cleaning up resources")
ix := xslices.Index(tsr.Finalizers, FinalizerName)
if ix < 0 {
logger.Debugf("no finalizer, nothing to do")
return reconcile.Result{}, nil
}
if done, err := r.maybeCleanup(ctx, tsr); err != nil {
return reconcile.Result{}, err
} else if !done {
logger.Debugf("Recorder resource cleanup not yet finished, will retry...")
return reconcile.Result{RequeueAfter: shortRequeue}, nil
}
tsr.Finalizers = slices.Delete(tsr.Finalizers, ix, ix+1)
if err := r.Update(ctx, tsr); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
oldTSRStatus := tsr.Status.DeepCopy()
setStatusReady := func(tsr *tsapi.Recorder, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) {
tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, status, reason, message, tsr.Generation, r.clock, logger)
if !apiequality.Semantic.DeepEqual(oldTSRStatus, tsr.Status) {
// An error encountered here should get returned by the Reconcile function.
if updateErr := r.Client.Status().Update(ctx, tsr); updateErr != nil {
err = errors.Wrap(err, updateErr.Error())
}
}
return reconcile.Result{}, err
}
if !slices.Contains(tsr.Finalizers, FinalizerName) {
// This log line is printed exactly once during initial provisioning,
// because once the finalizer is in place this block gets skipped. So,
// this is a nice place to log that the high level, multi-reconcile
// operation is underway.
logger.Infof("ensuring Recorder is set up")
tsr.Finalizers = append(tsr.Finalizers, FinalizerName)
if err := r.Update(ctx, tsr); err != nil {
logger.Errorf("error adding finalizer: %w", err)
return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderCreationFailed, reasonRecorderCreationFailed)
}
}
if err := r.validate(tsr); err != nil {
logger.Errorf("error validating Recorder spec: %w", err)
message := fmt.Sprintf("Recorder is invalid: %s", err)
r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderInvalid, message)
return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderInvalid, message)
}
if err = r.maybeProvision(ctx, tsr); err != nil {
logger.Errorf("error creating Recorder resources: %w", err)
message := fmt.Sprintf("failed creating Recorder: %s", err)
r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderCreationFailed, message)
return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderCreationFailed, message)
}
logger.Info("Recorder resources synced")
return setStatusReady(tsr, metav1.ConditionTrue, reasonRecorderCreated, reasonRecorderCreated)
}
func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Recorder) error {
logger := r.logger(tsr.Name)
r.mu.Lock()
r.recorders.Add(tsr.UID)
gaugeRecorderResources.Set(int64(r.recorders.Len()))
r.mu.Unlock()
if err := r.ensureAuthSecretCreated(ctx, tsr); err != nil {
return fmt.Errorf("error creating secrets: %w", err)
}
// State secret is precreated so we can use the Recorder CR as its owner ref.
sec := tsrStateSecret(tsr, r.tsNamespace)
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sec, func(s *corev1.Secret) {
s.ObjectMeta.Labels = sec.ObjectMeta.Labels
s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations
s.ObjectMeta.OwnerReferences = sec.ObjectMeta.OwnerReferences
}); err != nil {
return fmt.Errorf("error creating state Secret: %w", err)
}
sa := tsrServiceAccount(tsr, r.tsNamespace)
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) {
s.ObjectMeta.Labels = sa.ObjectMeta.Labels
s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations
s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences
}); err != nil {
return fmt.Errorf("error creating ServiceAccount: %w", err)
}
role := tsrRole(tsr, r.tsNamespace)
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) {
r.ObjectMeta.Labels = role.ObjectMeta.Labels
r.ObjectMeta.Annotations = role.ObjectMeta.Annotations
r.ObjectMeta.OwnerReferences = role.ObjectMeta.OwnerReferences
r.Rules = role.Rules
}); err != nil {
return fmt.Errorf("error creating Role: %w", err)
}
roleBinding := tsrRoleBinding(tsr, r.tsNamespace)
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) {
r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels
r.ObjectMeta.Annotations = roleBinding.ObjectMeta.Annotations
r.ObjectMeta.OwnerReferences = roleBinding.ObjectMeta.OwnerReferences
r.RoleRef = roleBinding.RoleRef
r.Subjects = roleBinding.Subjects
}); err != nil {
return fmt.Errorf("error creating RoleBinding: %w", err)
}
ss := tsrStatefulSet(tsr, r.tsNamespace)
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) {
s.ObjectMeta.Labels = ss.ObjectMeta.Labels
s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations
s.ObjectMeta.OwnerReferences = ss.ObjectMeta.OwnerReferences
s.Spec = ss.Spec
}); err != nil {
return fmt.Errorf("error creating StatefulSet: %w", err)
}
var devices []tsapi.TailnetDevice
device, ok, err := r.getDeviceInfo(ctx, tsr.Name)
if err != nil {
return fmt.Errorf("failed to get device info: %w", err)
}
if !ok {
logger.Debugf("no Tailscale hostname known yet, waiting for Recorder pod to finish auth")
return nil
}
devices = append(devices, device)
tsr.Status.Devices = devices
return nil
}
// maybeCleanup just deletes the device from the tailnet. All the kubernetes
// resources linked to a Recorder will get cleaned up via owner references
// (which we can use because they are all in the same namespace).
func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Recorder) (bool, error) {
logger := r.logger(tsr.Name)
id, _, ok, err := r.getNodeMetadata(ctx, tsr.Name)
if err != nil {
return false, err
}
if !ok {
logger.Debugf("state Secret %s-0 not found or does not contain node ID, continuing cleanup", tsr.Name)
r.mu.Lock()
r.recorders.Remove(tsr.UID)
gaugeRecorderResources.Set(int64(r.recorders.Len()))
r.mu.Unlock()
return true, nil
}
logger.Debugf("deleting device %s from control", string(id))
if err := r.tsClient.DeleteDevice(ctx, string(id)); err != nil {
errResp := &tailscale.ErrResponse{}
if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound {
logger.Debugf("device %s not found, likely because it has already been deleted from control", string(id))
} else {
return false, fmt.Errorf("error deleting device: %w", err)
}
} else {
logger.Debugf("device %s deleted from control", string(id))
}
// Unlike most log entries in the reconcile loop, this will get printed
// exactly once at the very end of cleanup, because the final step of
// cleanup removes the tailscale finalizer, which will make all future
// reconciles exit early.
logger.Infof("cleaned up Recorder resources")
r.mu.Lock()
r.recorders.Remove(tsr.UID)
gaugeRecorderResources.Set(int64(r.recorders.Len()))
r.mu.Unlock()
return true, nil
}
func (r *RecorderReconciler) ensureAuthSecretCreated(ctx context.Context, tsr *tsapi.Recorder) error {
logger := r.logger(tsr.Name)
key := types.NamespacedName{
Namespace: r.tsNamespace,
Name: tsr.Name,
}
if err := r.Get(ctx, key, &corev1.Secret{}); err == nil {
// No updates, already created the auth key.
logger.Debugf("auth Secret %s already exists", key.Name)
return nil
} else if !apierrors.IsNotFound(err) {
return err
}
// Create the auth key Secret which is going to be used by the StatefulSet
// to authenticate with Tailscale.
logger.Debugf("creating authkey for new Recorder")
tags := tsr.Spec.Tags
if len(tags) == 0 {
tags = tsapi.Tags{"tag:k8s"}
}
authKey, err := newAuthKey(ctx, r.tsClient, tags.Stringify())
if err != nil {
return err
}
logger.Debug("creating a new Secret for the Recorder")
if err := r.Create(ctx, tsrAuthSecret(tsr, r.tsNamespace, authKey)); err != nil {
return err
}
return nil
}
func (r *RecorderReconciler) validate(tsr *tsapi.Recorder) error {
if !tsr.Spec.EnableUI && tsr.Spec.Storage.S3 == nil {
return errors.New("must either enable UI or use S3 storage to ensure recordings are accessible")
}
return nil
}
// getNodeMetadata returns 'ok == true' iff the node ID is found. The dnsName
// is expected to always be non-empty if the node ID is, but not required.
func (r *RecorderReconciler) getNodeMetadata(ctx context.Context, tsrName string) (id tailcfg.StableNodeID, dnsName string, ok bool, err error) {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: r.tsNamespace,
Name: fmt.Sprintf("%s-0", tsrName),
},
}
if err := r.Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil {
if apierrors.IsNotFound(err) {
return "", "", false, nil
}
return "", "", false, err
}
// TODO(tomhjp): Should maybe use ipn to parse the following info instead.
currentProfile, ok := secret.Data[currentProfileKey]
if !ok {
return "", "", false, nil
}
profileBytes, ok := secret.Data[string(currentProfile)]
if !ok {
return "", "", false, nil
}
var profile profile
if err := json.Unmarshal(profileBytes, &profile); err != nil {
return "", "", false, fmt.Errorf("failed to extract node profile info from state Secret %s: %w", secret.Name, err)
}
ok = profile.Config.NodeID != ""
return tailcfg.StableNodeID(profile.Config.NodeID), profile.Config.UserProfile.LoginName, ok, nil
}
func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string) (d tsapi.TailnetDevice, ok bool, err error) {
nodeID, dnsName, ok, err := r.getNodeMetadata(ctx, tsrName)
if !ok || err != nil {
return tsapi.TailnetDevice{}, false, err
}
// TODO(tomhjp): The profile info doesn't include addresses, which is why we
// need the API. Should we instead update the profile to include addresses?
device, err := r.tsClient.Device(ctx, string(nodeID), nil)
if err != nil {
return tsapi.TailnetDevice{}, false, fmt.Errorf("failed to get device info from API: %w", err)
}
d = tsapi.TailnetDevice{
Hostname: device.Hostname,
TailnetIPs: device.Addresses,
}
if dnsName != "" {
d.URL = fmt.Sprintf("https://%s", dnsName)
}
return d, true, nil
}
type profile struct {
Config struct {
NodeID string `json:"NodeID"`
UserProfile struct {
LoginName string `json:"LoginName"`
} `json:"UserProfile"`
} `json:"Config"`
}
func markedForDeletion(tsr *tsapi.Recorder) bool {
return !tsr.DeletionTimestamp.IsZero()
}

@ -0,0 +1,278 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !plan9
package main
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
"tailscale.com/types/ptr"
"tailscale.com/version"
)
func tsrStatefulSet(tsr *tsapi.Recorder, namespace string) *appsv1.StatefulSet {
return &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: tsr.Name,
Namespace: namespace,
Labels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Labels),
OwnerReferences: tsrOwnerReference(tsr),
Annotations: tsr.Spec.StatefulSet.Annotations,
},
Spec: appsv1.StatefulSetSpec{
Replicas: ptr.To[int32](1),
Selector: &metav1.LabelSelector{
MatchLabels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels),
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: tsr.Name,
Namespace: namespace,
Labels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels),
Annotations: tsr.Spec.StatefulSet.Pod.Annotations,
},
Spec: corev1.PodSpec{
ServiceAccountName: tsr.Name,
Affinity: tsr.Spec.StatefulSet.Pod.Affinity,
SecurityContext: tsr.Spec.StatefulSet.Pod.SecurityContext,
ImagePullSecrets: tsr.Spec.StatefulSet.Pod.ImagePullSecrets,
NodeSelector: tsr.Spec.StatefulSet.Pod.NodeSelector,
Tolerations: tsr.Spec.StatefulSet.Pod.Tolerations,
Containers: []corev1.Container{
{
Name: "recorder",
Image: func() string {
image := tsr.Spec.StatefulSet.Pod.Container.Image
if image == "" {
image = fmt.Sprintf("tailscale/tsrecorder:%s", selfVersionImageTag())
}
return image
}(),
ImagePullPolicy: tsr.Spec.StatefulSet.Pod.Container.ImagePullPolicy,
Resources: tsr.Spec.StatefulSet.Pod.Container.Resources,
SecurityContext: tsr.Spec.StatefulSet.Pod.Container.SecurityContext,
Env: env(tsr),
EnvFrom: func() []corev1.EnvFromSource {
if tsr.Spec.Storage.S3 == nil || tsr.Spec.Storage.S3.Credentials.Secret.Name == "" {
return nil
}
return []corev1.EnvFromSource{{
SecretRef: &corev1.SecretEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: tsr.Spec.Storage.S3.Credentials.Secret.Name,
},
},
}}
}(),
Command: []string{"/tsrecorder"},
VolumeMounts: []corev1.VolumeMount{
{
Name: "data",
MountPath: "/data",
ReadOnly: false,
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "data",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
},
},
},
},
}
}
func tsrServiceAccount(tsr *tsapi.Recorder, namespace string) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: tsr.Name,
Namespace: namespace,
Labels: labels("recorder", tsr.Name, nil),
OwnerReferences: tsrOwnerReference(tsr),
},
}
}
func tsrRole(tsr *tsapi.Recorder, namespace string) *rbacv1.Role {
return &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: tsr.Name,
Namespace: namespace,
Labels: labels("recorder", tsr.Name, nil),
OwnerReferences: tsrOwnerReference(tsr),
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"secrets"},
Verbs: []string{
"get",
"patch",
"update",
},
ResourceNames: []string{
tsr.Name, // Contains the auth key.
fmt.Sprintf("%s-0", tsr.Name), // Contains the node state.
},
},
},
}
}
func tsrRoleBinding(tsr *tsapi.Recorder, namespace string) *rbacv1.RoleBinding {
return &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: tsr.Name,
Namespace: namespace,
Labels: labels("recorder", tsr.Name, nil),
OwnerReferences: tsrOwnerReference(tsr),
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: tsr.Name,
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "Role",
Name: tsr.Name,
},
}
}
func tsrAuthSecret(tsr *tsapi.Recorder, namespace string, authKey string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: tsr.Name,
Labels: labels("recorder", tsr.Name, nil),
OwnerReferences: tsrOwnerReference(tsr),
},
StringData: map[string]string{
"authkey": authKey,
},
}
}
func tsrStateSecret(tsr *tsapi.Recorder, namespace string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-0", tsr.Name),
Namespace: namespace,
Labels: labels("recorder", tsr.Name, nil),
OwnerReferences: tsrOwnerReference(tsr),
},
}
}
func env(tsr *tsapi.Recorder) []corev1.EnvVar {
envs := []corev1.EnvVar{
{
Name: "TS_AUTHKEY",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: tsr.Name,
},
Key: "authkey",
},
},
},
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
// Secret is named after the pod.
FieldPath: "metadata.name",
},
},
},
{
Name: "TS_STATE",
Value: "kube:$(POD_NAME)",
},
{
Name: "TSRECORDER_HOSTNAME",
Value: "$(POD_NAME)",
},
}
for _, env := range tsr.Spec.StatefulSet.Pod.Container.Env {
envs = append(envs, corev1.EnvVar{
Name: string(env.Name),
Value: env.Value,
})
}
if tsr.Spec.Storage.S3 != nil {
envs = append(envs,
corev1.EnvVar{
Name: "TSRECORDER_DST",
Value: fmt.Sprintf("s3://%s", tsr.Spec.Storage.S3.Endpoint),
},
corev1.EnvVar{
Name: "TSRECORDER_BUCKET",
Value: tsr.Spec.Storage.S3.Bucket,
},
)
} else {
envs = append(envs, corev1.EnvVar{
Name: "TSRECORDER_DST",
Value: "/data/recordings",
})
}
if tsr.Spec.EnableUI {
envs = append(envs, corev1.EnvVar{
Name: "TSRECORDER_UI",
Value: "true",
})
}
return envs
}
func labels(app, instance string, customLabels map[string]string) map[string]string {
l := make(map[string]string, len(customLabels)+3)
for k, v := range customLabels {
l[k] = v
}
// ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/
l["app.kubernetes.io/name"] = app
l["app.kubernetes.io/instance"] = instance
l["app.kubernetes.io/managed-by"] = "tailscale-operator"
return l
}
func tsrOwnerReference(owner metav1.Object) []metav1.OwnerReference {
return []metav1.OwnerReference{*metav1.NewControllerRef(owner, tsapi.SchemeGroupVersion.WithKind("Recorder"))}
}
// selfVersionImageTag returns the container image tag of the running operator
// build.
func selfVersionImageTag() string {
meta := version.GetMeta()
var versionPrefix string
if meta.UnstableBranch {
versionPrefix = "unstable-"
}
return fmt.Sprintf("%sv%s", versionPrefix, meta.MajorMinorPatch)
}

@ -0,0 +1,143 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !plan9
package main
import (
"testing"
"github.com/google/go-cmp/cmp"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
"tailscale.com/types/ptr"
)
func TestRecorderSpecs(t *testing.T) {
t.Run("ensure spec fields are passed through correctly", func(t *testing.T) {
tsr := &tsapi.Recorder{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
Spec: tsapi.RecorderSpec{
StatefulSet: tsapi.RecorderStatefulSet{
Labels: map[string]string{
"ss-label-key": "ss-label-value",
},
Annotations: map[string]string{
"ss-annotation-key": "ss-annotation-value",
},
Pod: tsapi.RecorderPod{
Labels: map[string]string{
"pod-label-key": "pod-label-value",
},
Annotations: map[string]string{
"pod-annotation-key": "pod-annotation-value",
},
Affinity: &corev1.Affinity{
PodAffinity: &corev1.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"match-label": "match-value",
},
}},
},
},
},
SecurityContext: &corev1.PodSecurityContext{
RunAsUser: ptr.To[int64](1000),
},
ImagePullSecrets: []corev1.LocalObjectReference{{
Name: "img-pull",
}},
NodeSelector: map[string]string{
"some-node": "selector",
},
Tolerations: []corev1.Toleration{{
Key: "key",
Value: "value",
TolerationSeconds: ptr.To[int64](60),
}},
Container: tsapi.RecorderContainer{
Env: []tsapi.Env{{
Name: "some_env",
Value: "env_value",
}},
Image: "custom-image",
ImagePullPolicy: corev1.PullAlways,
SecurityContext: &corev1.SecurityContext{
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{
"NET_ADMIN",
},
},
},
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("100m"),
},
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("50m"),
},
},
},
},
},
},
}
ss := tsrStatefulSet(tsr, tsNamespace)
// StatefulSet-level.
if diff := cmp.Diff(ss.Annotations, tsr.Spec.StatefulSet.Annotations); diff != "" {
t.Errorf("(-got +want):\n%s", diff)
}
if diff := cmp.Diff(ss.Spec.Template.Annotations, tsr.Spec.StatefulSet.Pod.Annotations); diff != "" {
t.Errorf("(-got +want):\n%s", diff)
}
// Pod-level.
if diff := cmp.Diff(ss.Labels, labels("recorder", "test", tsr.Spec.StatefulSet.Labels)); diff != "" {
t.Errorf("(-got +want):\n%s", diff)
}
if diff := cmp.Diff(ss.Spec.Template.Labels, labels("recorder", "test", tsr.Spec.StatefulSet.Pod.Labels)); diff != "" {
t.Errorf("(-got +want):\n%s", diff)
}
if diff := cmp.Diff(ss.Spec.Template.Spec.Affinity, tsr.Spec.StatefulSet.Pod.Affinity); diff != "" {
t.Errorf("(-got +want):\n%s", diff)
}
if diff := cmp.Diff(ss.Spec.Template.Spec.SecurityContext, tsr.Spec.StatefulSet.Pod.SecurityContext); diff != "" {
t.Errorf("(-got +want):\n%s", diff)
}
if diff := cmp.Diff(ss.Spec.Template.Spec.ImagePullSecrets, tsr.Spec.StatefulSet.Pod.ImagePullSecrets); diff != "" {
t.Errorf("(-got +want):\n%s", diff)
}
if diff := cmp.Diff(ss.Spec.Template.Spec.NodeSelector, tsr.Spec.StatefulSet.Pod.NodeSelector); diff != "" {
t.Errorf("(-got +want):\n%s", diff)
}
if diff := cmp.Diff(ss.Spec.Template.Spec.Tolerations, tsr.Spec.StatefulSet.Pod.Tolerations); diff != "" {
t.Errorf("(-got +want):\n%s", diff)
}
// Container-level.
if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Env, env(tsr)); diff != "" {
t.Errorf("(-got +want):\n%s", diff)
}
if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Image, tsr.Spec.StatefulSet.Pod.Container.Image); diff != "" {
t.Errorf("(-got +want):\n%s", diff)
}
if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].ImagePullPolicy, tsr.Spec.StatefulSet.Pod.Container.ImagePullPolicy); diff != "" {
t.Errorf("(-got +want):\n%s", diff)
}
if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].SecurityContext, tsr.Spec.StatefulSet.Pod.Container.SecurityContext); diff != "" {
t.Errorf("(-got +want):\n%s", diff)
}
if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Resources, tsr.Spec.StatefulSet.Pod.Container.Resources); diff != "" {
t.Errorf("(-got +want):\n%s", diff)
}
})
}

@ -0,0 +1,162 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !plan9
package main
import (
"context"
"encoding/json"
"testing"
"github.com/google/go-cmp/cmp"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
tsoperator "tailscale.com/k8s-operator"
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
"tailscale.com/tstest"
)
const tsNamespace = "tailscale"
func TestRecorder(t *testing.T) {
tsr := &tsapi.Recorder{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Finalizers: []string{"tailscale.com/finalizer"},
},
}
fc := fake.NewClientBuilder().
WithScheme(tsapi.GlobalScheme).
WithObjects(tsr).
WithStatusSubresource(tsr).
Build()
tsClient := &fakeTSClient{}
zl, _ := zap.NewDevelopment()
fr := record.NewFakeRecorder(1)
cl := tstest.NewClock(tstest.ClockOpts{})
reconciler := &RecorderReconciler{
tsNamespace: tsNamespace,
Client: fc,
tsClient: tsClient,
recorder: fr,
l: zl.Sugar(),
clock: cl,
}
t.Run("invalid spec gives an error condition", func(t *testing.T) {
expectReconciled(t, reconciler, "", tsr.Name)
msg := "Recorder is invalid: must either enable UI or use S3 storage to ensure recordings are accessible"
tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, metav1.ConditionFalse, reasonRecorderInvalid, msg, 0, cl, zl.Sugar())
expectEqual(t, fc, tsr, nil)
if expected := 0; reconciler.recorders.Len() != expected {
t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len())
}
expectRecorderResources(t, fc, tsr, false)
expectedEvent := "Warning RecorderInvalid Recorder is invalid: must either enable UI or use S3 storage to ensure recordings are accessible"
expectEvents(t, fr, []string{expectedEvent})
})
t.Run("observe Ready=true status condition for a valid spec", func(t *testing.T) {
tsr.Spec.EnableUI = true
mustUpdate(t, fc, "", "test", func(t *tsapi.Recorder) {
t.Spec = tsr.Spec
})
expectReconciled(t, reconciler, "", tsr.Name)
tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, metav1.ConditionTrue, reasonRecorderCreated, reasonRecorderCreated, 0, cl, zl.Sugar())
expectEqual(t, fc, tsr, nil)
if expected := 1; reconciler.recorders.Len() != expected {
t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len())
}
expectRecorderResources(t, fc, tsr, true)
})
t.Run("populate node info in state secret, and see it appear in status", func(t *testing.T) {
bytes, err := json.Marshal(map[string]any{
"Config": map[string]any{
"NodeID": "nodeid-123",
"UserProfile": map[string]any{
"LoginName": "test-0.example.ts.net",
},
},
})
if err != nil {
t.Fatal(err)
}
const key = "profile-abc"
mustUpdate(t, fc, tsNamespace, "test-0", func(s *corev1.Secret) {
s.Data = map[string][]byte{
currentProfileKey: []byte(key),
key: bytes,
}
})
expectReconciled(t, reconciler, "", tsr.Name)
tsr.Status.Devices = []tsapi.TailnetDevice{
{
Hostname: "test-device",
TailnetIPs: []string{"1.2.3.4", "::1"},
URL: "https://test-0.example.ts.net",
},
}
expectEqual(t, fc, tsr, nil)
})
t.Run("delete the Recorder and observe cleanup", func(t *testing.T) {
if err := fc.Delete(context.Background(), tsr); err != nil {
t.Fatal(err)
}
expectReconciled(t, reconciler, "", tsr.Name)
expectMissing[tsapi.Recorder](t, fc, "", tsr.Name)
if expected := 0; reconciler.recorders.Len() != expected {
t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len())
}
if diff := cmp.Diff(tsClient.deleted, []string{"nodeid-123"}); diff != "" {
t.Fatalf("unexpected deleted devices (-got +want):\n%s", diff)
}
// The fake client does not clean up objects whose owner has been
// deleted, so we can't test for the owned resources getting deleted.
})
}
func expectRecorderResources(t *testing.T, fc client.WithWatch, tsr *tsapi.Recorder, shouldExist bool) {
t.Helper()
auth := tsrAuthSecret(tsr, tsNamespace, "secret-authkey")
state := tsrStateSecret(tsr, tsNamespace)
role := tsrRole(tsr, tsNamespace)
roleBinding := tsrRoleBinding(tsr, tsNamespace)
serviceAccount := tsrServiceAccount(tsr, tsNamespace)
statefulSet := tsrStatefulSet(tsr, tsNamespace)
if shouldExist {
expectEqual(t, fc, auth, nil)
expectEqual(t, fc, state, nil)
expectEqual(t, fc, role, nil)
expectEqual(t, fc, roleBinding, nil)
expectEqual(t, fc, serviceAccount, nil)
expectEqual(t, fc, statefulSet, nil)
} else {
expectMissing[corev1.Secret](t, fc, auth.Namespace, auth.Name)
expectMissing[corev1.Secret](t, fc, state.Namespace, state.Name)
expectMissing[rbacv1.Role](t, fc, role.Namespace, role.Name)
expectMissing[rbacv1.RoleBinding](t, fc, roleBinding.Namespace, roleBinding.Name)
expectMissing[corev1.ServiceAccount](t, fc, serviceAccount.Namespace, serviceAccount.Name)
expectMissing[appsv1.StatefulSet](t, fc, statefulSet.Namespace, statefulSet.Name)
}
}

@ -14,6 +14,8 @@
- [DNSConfigList](#dnsconfiglist) - [DNSConfigList](#dnsconfiglist)
- [ProxyClass](#proxyclass) - [ProxyClass](#proxyclass)
- [ProxyClassList](#proxyclasslist) - [ProxyClassList](#proxyclasslist)
- [Recorder](#recorder)
- [RecorderList](#recorderlist)
@ -236,6 +238,7 @@ _Appears in:_
_Appears in:_ _Appears in:_
- [Container](#container) - [Container](#container)
- [RecorderContainer](#recordercontainer)
| Field | Description | Default | Validation | | Field | Description | Default | Validation |
| --- | --- | --- | --- | | --- | --- | --- | --- |
@ -258,23 +261,6 @@ _Appears in:_
#### Image
_Appears in:_
- [Nameserver](#nameserver)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `repo` _string_ | Repo defaults to tailscale/k8s-nameserver. | | |
| `tag` _string_ | Tag defaults to operator's own tag. | | |
#### Metrics #### Metrics
@ -319,7 +305,24 @@ _Appears in:_
| Field | Description | Default | Validation | | Field | Description | Default | Validation |
| --- | --- | --- | --- | | --- | --- | --- | --- |
| `image` _[Image](#image)_ | Nameserver image. | | | | `image` _[NameserverImage](#nameserverimage)_ | Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. | | |
#### NameserverImage
_Appears in:_
- [Nameserver](#nameserver)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `repo` _string_ | Repo defaults to tailscale/k8s-nameserver. | | |
| `tag` _string_ | Tag defaults to unstable. | | |
#### NameserverStatus #### NameserverStatus
@ -447,6 +450,145 @@ _Appears in:_
| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyClass.<br />Known condition types are `ProxyClassReady`. | | | | `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyClass.<br />Known condition types are `ProxyClassReady`. | | |
#### Recorder
_Appears in:_
- [RecorderList](#recorderlist)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | |
| `kind` _string_ | `Recorder` | | |
| `kind` _string_ | Kind is a string value representing the REST resource this object represents.<br />Servers may infer this from the endpoint the client submits requests to.<br />Cannot be updated.<br />In CamelCase.<br />More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.<br />Servers should convert recognized schemas to the latest internal value, and<br />may reject unrecognized values.<br />More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[RecorderSpec](#recorderspec)_ | Spec describes the desired recorder instance. | | |
| `status` _[RecorderStatus](#recorderstatus)_ | RecorderStatus describes the status of the recorder. This is set<br />and managed by the Tailscale operator. | | |
#### RecorderContainer
_Appears in:_
- [RecorderPod](#recorderpod)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `env` _[Env](#env) array_ | List of environment variables to set in the container.<br />https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables<br />Note that environment variables provided here will take precedence<br />over Tailscale-specific environment variables set by the operator,<br />however running proxies with custom values for Tailscale environment<br />variables (i.e TS_USERSPACE) is not recommended and might break in<br />the future. | | |
| `image` _string_ | Container image name including tag. Defaults to docker.io/tailscale/tsrecorder<br />with the same tag as the operator, but the official images are also<br />available at ghcr.io/tailscale/tsrecorder.<br />https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | |
| `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#pullpolicy-v1-core)_ | Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always.<br />https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | Enum: [Always Never IfNotPresent] <br /> |
| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#resourcerequirements-v1-core)_ | Container resource requirements.<br />By default, the operator does not apply any resource requirements. The<br />amount of resources required wil depend on the volume of recordings sent.<br />https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources | | |
| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#securitycontext-v1-core)_ | Container security context. By default, the operator does not apply any<br />container security context.<br />https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context | | |
#### RecorderList
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | |
| `kind` _string_ | `RecorderList` | | |
| `kind` _string_ | Kind is a string value representing the REST resource this object represents.<br />Servers may infer this from the endpoint the client submits requests to.<br />Cannot be updated.<br />In CamelCase.<br />More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.<br />Servers should convert recognized schemas to the latest internal value, and<br />may reject unrecognized values.<br />More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `items` _[Recorder](#recorder) array_ | | | |
#### RecorderPod
_Appears in:_
- [RecorderStatefulSet](#recorderstatefulset)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `labels` _object (keys:string, values:string)_ | Labels that will be added to Recorder Pods. Any labels specified here<br />will be merged with the default labels applied to the Pod by the operator.<br />https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set | | |
| `annotations` _object (keys:string, values:string)_ | Annotations that will be added to Recorder Pods. Any annotations<br />specified here will be merged with the default annotations applied to<br />the Pod by the operator.<br />https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set | | |
| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#affinity-v1-core)_ | Affinity rules for Recorder Pods. By default, the operator does not<br />apply any affinity rules.<br />https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#affinity | | |
| `container` _[RecorderContainer](#recordercontainer)_ | Configuration for the Recorder container running tailscale. | | |
| `securityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#podsecuritycontext-v1-core)_ | Security context for Recorder Pods. By default, the operator does not<br />apply any Pod security context.<br />https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2 | | |
| `imagePullSecrets` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#localobjectreference-v1-core) array_ | Image pull Secrets for Recorder Pods.<br />https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec | | |
| `nodeSelector` _object (keys:string, values:string)_ | Node selector rules for Recorder Pods. By default, the operator does<br />not apply any node selector rules.<br />https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | |
| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | Tolerations for Recorder Pods. By default, the operator does not apply<br />any tolerations.<br />https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | |
#### RecorderSpec
_Appears in:_
- [Recorder](#recorder)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `statefulSet` _[RecorderStatefulSet](#recorderstatefulset)_ | Configuration parameters for the Recorder's StatefulSet. The operator<br />deploys a StatefulSet for each Recorder resource. | | |
| `tags` _[Tags](#tags)_ | Tags that the Tailscale device will be tagged with. Defaults to [tag:k8s].<br />If you specify custom tags here, make sure you also make the operator<br />an owner of these tags.<br />See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.<br />Tags cannot be changed once a Recorder node has been created.<br />Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$` <br />Type: string <br /> |
| `enableUI` _boolean_ | Set to true to enable the Recorder UI. The UI lists and plays recorded sessions.<br />The UI will be served at <MagicDNS name of the recorder>:443. Defaults to false.<br />Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node.<br />Required if S3 storage is not set up, to ensure that recordings are accessible. | | |
| `storage` _[Storage](#storage)_ | Configure where to store session recordings. By default, recordings will<br />be stored in a local ephemeral volume, and will not be persisted past the<br />lifetime of a specific pod. | | |
#### RecorderStatefulSet
_Appears in:_
- [RecorderSpec](#recorderspec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `labels` _object (keys:string, values:string)_ | Labels that will be added to the StatefulSet created for the Recorder.<br />Any labels specified here will be merged with the default labels applied<br />to the StatefulSet by the operator.<br />https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set | | |
| `annotations` _object (keys:string, values:string)_ | Annotations that will be added to the StatefulSet created for the Recorder.<br />Any Annotations specified here will be merged with the default annotations<br />applied to the StatefulSet by the operator.<br />https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set | | |
| `pod` _[RecorderPod](#recorderpod)_ | Configuration for pods created by the Recorder's StatefulSet. | | |
#### RecorderStatus
_Appears in:_
- [Recorder](#recorder)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the Recorder.<br />Known condition types are `RecorderReady`. | | |
| `devices` _[TailnetDevice](#tailnetdevice) array_ | List of tailnet devices associated with the Recorder statefulset. | | |
#### Route #### Route
_Underlying type:_ _string_ _Underlying type:_ _string_
@ -478,6 +620,56 @@ _Appears in:_
#### S3
_Appears in:_
- [Storage](#storage)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `endpoint` _string_ | S3-compatible endpoint, e.g. s3.us-east-1.amazonaws.com. | | |
| `bucket` _string_ | Bucket name to write to. The bucket is expected to be used solely for<br />recordings, as there is no stable prefix for written object names. | | |
| `credentials` _[S3Credentials](#s3credentials)_ | Configure environment variable credentials for managing objects in the<br />configured bucket. If not set, tsrecorder will try to acquire credentials<br />first from the file system and then the STS API. | | |
#### S3Credentials
_Appears in:_
- [S3](#s3)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `secret` _[S3Secret](#s3secret)_ | Use a Kubernetes Secret from the operator's namespace as the source of<br />credentials. | | |
#### S3Secret
_Appears in:_
- [S3Credentials](#s3credentials)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `name` _string_ | The name of a Kubernetes Secret in the operator's namespace that contains<br />credentials for writing to the configured bucket. Each key-value pair<br />from the secret's data will be mounted as an environment variable. It<br />should include keys for AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY if<br />using a static access key. | | |
#### StatefulSet #### StatefulSet
@ -496,6 +688,22 @@ _Appears in:_
| `pod` _[Pod](#pod)_ | Configuration for the proxy Pod. | | | | `pod` _[Pod](#pod)_ | Configuration for the proxy Pod. | | |
#### Storage
_Appears in:_
- [RecorderSpec](#recorderspec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `s3` _[S3](#s3)_ | Configure an S3-compatible API for storage. Required if the UI is not<br />enabled, to ensure that recordings are accessible. | | |
#### SubnetRouter #### SubnetRouter
@ -540,8 +748,27 @@ _Validation:_
_Appears in:_ _Appears in:_
- [ConnectorSpec](#connectorspec) - [ConnectorSpec](#connectorspec)
- [RecorderSpec](#recorderspec)
#### TailnetDevice
_Appears in:_
- [RecorderStatus](#recorderstatus)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `hostname` _string_ | Hostname is the fully qualified domain name of the device.<br />If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the<br />node. | | |
| `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)<br />assigned to the device. | | |
| `url` _string_ | URL where the UI is available if enabled for replaying recordings. This<br />will be an HTTPS MagicDNS URL. You must be connected to the same tailnet<br />as the recorder to access it. | | |
#### TailscaleConfig #### TailscaleConfig

@ -49,7 +49,16 @@ func init() {
// Adds the list of known types to api.Scheme. // Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error { func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion, &Connector{}, &ConnectorList{}, &ProxyClass{}, &ProxyClassList{}, &DNSConfig{}, &DNSConfigList{}) scheme.AddKnownTypes(SchemeGroupVersion,
&Connector{},
&ConnectorList{},
&ProxyClass{},
&ProxyClassList{},
&DNSConfig{},
&DNSConfigList{},
&Recorder{},
&RecorderList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion) metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil return nil

@ -173,4 +173,5 @@ const (
ConnectorReady ConditionType = `ConnectorReady` ConnectorReady ConditionType = `ConnectorReady`
ProxyClassready ConditionType = `ProxyClassReady` ProxyClassready ConditionType = `ProxyClassReady`
ProxyReady ConditionType = `TailscaleProxyReady` // a Tailscale-specific condition type for corev1.Service ProxyReady ConditionType = `TailscaleProxyReady` // a Tailscale-specific condition type for corev1.Service
RecorderReady ConditionType = `RecorderReady`
) )

@ -0,0 +1,249 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !plan9
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Cluster,shortName=rec
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "RecorderReady")].reason`,description="Status of the deployed Recorder resources."
// +kubebuilder:printcolumn:name="URL",type="string",JSONPath=`.status.devices[?(@.url != "")].url`,description="URL on which the UI is exposed if enabled."
type Recorder struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec describes the desired recorder instance.
Spec RecorderSpec `json:"spec"`
// RecorderStatus describes the status of the recorder. This is set
// and managed by the Tailscale operator.
// +optional
Status RecorderStatus `json:"status"`
}
// +kubebuilder:object:root=true
type RecorderList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Recorder `json:"items"`
}
type RecorderSpec struct {
// Configuration parameters for the Recorder's StatefulSet. The operator
// deploys a StatefulSet for each Recorder resource.
// +optional
StatefulSet RecorderStatefulSet `json:"statefulSet"`
// Tags that the Tailscale device will be tagged with. Defaults to [tag:k8s].
// If you specify custom tags here, make sure you also make the operator
// an owner of these tags.
// See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
// Tags cannot be changed once a Recorder node has been created.
// Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$.
// +optional
Tags Tags `json:"tags,omitempty"`
// TODO(tomhjp): Support a hostname or hostname prefix field, depending on
// the plan for multiple replicas.
// Set to true to enable the Recorder UI. The UI lists and plays recorded sessions.
// The UI will be served at <MagicDNS name of the recorder>:443. Defaults to false.
// Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node.
// Required if S3 storage is not set up, to ensure that recordings are accessible.
// +optional
EnableUI bool `json:"enableUI,omitempty"`
// Configure where to store session recordings. By default, recordings will
// be stored in a local ephemeral volume, and will not be persisted past the
// lifetime of a specific pod.
// +optional
Storage Storage `json:"storage,omitempty"`
}
type RecorderStatefulSet struct {
// Labels that will be added to the StatefulSet created for the Recorder.
// Any labels specified here will be merged with the default labels applied
// to the StatefulSet by the operator.
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set
// +optional
Labels map[string]string `json:"labels,omitempty"`
// Annotations that will be added to the StatefulSet created for the Recorder.
// Any Annotations specified here will be merged with the default annotations
// applied to the StatefulSet by the operator.
// https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set
// +optional
Annotations map[string]string `json:"annotations,omitempty"`
// Configuration for pods created by the Recorder's StatefulSet.
// +optional
Pod RecorderPod `json:"pod,omitempty"`
}
type RecorderPod struct {
// Labels that will be added to Recorder Pods. Any labels specified here
// will be merged with the default labels applied to the Pod by the operator.
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set
// +optional
Labels map[string]string `json:"labels,omitempty"`
// Annotations that will be added to Recorder Pods. Any annotations
// specified here will be merged with the default annotations applied to
// the Pod by the operator.
// https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set
// +optional
Annotations map[string]string `json:"annotations,omitempty"`
// Affinity rules for Recorder Pods. By default, the operator does not
// apply any affinity rules.
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#affinity
// +optional
Affinity *corev1.Affinity `json:"affinity,omitempty"`
// Configuration for the Recorder container running tailscale.
// +optional
Container RecorderContainer `json:"container,omitempty"`
// Security context for Recorder Pods. By default, the operator does not
// apply any Pod security context.
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2
// +optional
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
// Image pull Secrets for Recorder Pods.
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec
// +optional
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// Node selector rules for Recorder Pods. By default, the operator does
// not apply any node selector rules.
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Tolerations for Recorder Pods. By default, the operator does not apply
// any tolerations.
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling
// +optional
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
}
type RecorderContainer struct {
// List of environment variables to set in the container.
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables
// Note that environment variables provided here will take precedence
// over Tailscale-specific environment variables set by the operator,
// however running proxies with custom values for Tailscale environment
// variables (i.e TS_USERSPACE) is not recommended and might break in
// the future.
// +optional
Env []Env `json:"env,omitempty"`
// Container image name including tag. Defaults to docker.io/tailscale/tsrecorder
// with the same tag as the operator, but the official images are also
// available at ghcr.io/tailscale/tsrecorder.
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image
// +optional
Image string `json:"image,omitempty"`
// Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always.
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image
// +kubebuilder:validation:Enum=Always;Never;IfNotPresent
// +optional
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
// Container resource requirements.
// By default, the operator does not apply any resource requirements. The
// amount of resources required wil depend on the volume of recordings sent.
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources
// +optional
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
// Container security context. By default, the operator does not apply any
// container security context.
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context
// +optional
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"`
}
type Storage struct {
// Configure an S3-compatible API for storage. Required if the UI is not
// enabled, to ensure that recordings are accessible.
// +optional
S3 *S3 `json:"s3,omitempty"`
}
type S3 struct {
// S3-compatible endpoint, e.g. s3.us-east-1.amazonaws.com.
Endpoint string `json:"endpoint,omitempty"`
// Bucket name to write to. The bucket is expected to be used solely for
// recordings, as there is no stable prefix for written object names.
Bucket string `json:"bucket,omitempty"`
// Configure environment variable credentials for managing objects in the
// configured bucket. If not set, tsrecorder will try to acquire credentials
// first from the file system and then the STS API.
// +optional
Credentials S3Credentials `json:"credentials,omitempty"`
}
type S3Credentials struct {
// Use a Kubernetes Secret from the operator's namespace as the source of
// credentials.
// +optional
Secret S3Secret `json:"secret,omitempty"`
}
type S3Secret struct {
// The name of a Kubernetes Secret in the operator's namespace that contains
// credentials for writing to the configured bucket. Each key-value pair
// from the secret's data will be mounted as an environment variable. It
// should include keys for AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY if
// using a static access key.
//+optional
Name string `json:"name,omitempty"`
}
type RecorderStatus struct {
// List of status conditions to indicate the status of the Recorder.
// Known condition types are `RecorderReady`.
// +listType=map
// +listMapKey=type
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// List of tailnet devices associated with the Recorder statefulset.
// +listType=map
// +listMapKey=hostname
// +optional
Devices []TailnetDevice `json:"devices,omitempty"`
}
type TailnetDevice struct {
// Hostname is the fully qualified domain name of the device.
// If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
// node.
Hostname string `json:"hostname"`
// TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
// assigned to the device.
// +optional
TailnetIPs []string `json:"tailnetIPs,omitempty"`
// URL where the UI is available if enabled for replaying recordings. This
// will be an HTTPS MagicDNS URL. You must be connected to the same tailnet
// as the recorder to access it.
// +optional
URL string `json:"url,omitempty"`
}

@ -78,16 +78,16 @@ type DNSConfigSpec struct {
} }
type Nameserver struct { type Nameserver struct {
// Nameserver image. // Nameserver image. Defaults to tailscale/k8s-nameserver:unstable.
// +optional // +optional
Image *Image `json:"image,omitempty"` Image *NameserverImage `json:"image,omitempty"`
} }
type Image struct { type NameserverImage struct {
// Repo defaults to tailscale/k8s-nameserver. // Repo defaults to tailscale/k8s-nameserver.
// +optional // +optional
Repo string `json:"repo,omitempty"` Repo string `json:"repo,omitempty"`
// Tag defaults to operator's own tag. // Tag defaults to unstable.
// +optional // +optional
Tag string `json:"tag,omitempty"` Tag string `json:"tag,omitempty"`
} }

@ -271,21 +271,6 @@ func (in *Env) DeepCopy() *Env {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Image) DeepCopyInto(out *Image) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image.
func (in *Image) DeepCopy() *Image {
if in == nil {
return nil
}
out := new(Image)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Metrics) DeepCopyInto(out *Metrics) { func (in *Metrics) DeepCopyInto(out *Metrics) {
*out = *in *out = *in
@ -306,7 +291,7 @@ func (in *Nameserver) DeepCopyInto(out *Nameserver) {
*out = *in *out = *in
if in.Image != nil { if in.Image != nil {
in, out := &in.Image, &out.Image in, out := &in.Image, &out.Image
*out = new(Image) *out = new(NameserverImage)
**out = **in **out = **in
} }
} }
@ -321,6 +306,21 @@ func (in *Nameserver) DeepCopy() *Nameserver {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NameserverImage) DeepCopyInto(out *NameserverImage) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameserverImage.
func (in *NameserverImage) DeepCopy() *NameserverImage {
if in == nil {
return nil
}
out := new(NameserverImage)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NameserverStatus) DeepCopyInto(out *NameserverStatus) { func (in *NameserverStatus) DeepCopyInto(out *NameserverStatus) {
*out = *in *out = *in
@ -515,6 +515,231 @@ func (in *ProxyClassStatus) DeepCopy() *ProxyClassStatus {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Recorder) DeepCopyInto(out *Recorder) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Recorder.
func (in *Recorder) DeepCopy() *Recorder {
if in == nil {
return nil
}
out := new(Recorder)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Recorder) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RecorderContainer) DeepCopyInto(out *RecorderContainer) {
*out = *in
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]Env, len(*in))
copy(*out, *in)
}
in.Resources.DeepCopyInto(&out.Resources)
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(corev1.SecurityContext)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderContainer.
func (in *RecorderContainer) DeepCopy() *RecorderContainer {
if in == nil {
return nil
}
out := new(RecorderContainer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RecorderList) DeepCopyInto(out *RecorderList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Recorder, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderList.
func (in *RecorderList) DeepCopy() *RecorderList {
if in == nil {
return nil
}
out := new(RecorderList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RecorderList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RecorderPod) DeepCopyInto(out *RecorderPod) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(corev1.Affinity)
(*in).DeepCopyInto(*out)
}
in.Container.DeepCopyInto(&out.Container)
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(corev1.PodSecurityContext)
(*in).DeepCopyInto(*out)
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]corev1.LocalObjectReference, len(*in))
copy(*out, *in)
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]corev1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderPod.
func (in *RecorderPod) DeepCopy() *RecorderPod {
if in == nil {
return nil
}
out := new(RecorderPod)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RecorderSpec) DeepCopyInto(out *RecorderSpec) {
*out = *in
in.StatefulSet.DeepCopyInto(&out.StatefulSet)
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make(Tags, len(*in))
copy(*out, *in)
}
in.Storage.DeepCopyInto(&out.Storage)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderSpec.
func (in *RecorderSpec) DeepCopy() *RecorderSpec {
if in == nil {
return nil
}
out := new(RecorderSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RecorderStatefulSet) DeepCopyInto(out *RecorderStatefulSet) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
in.Pod.DeepCopyInto(&out.Pod)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderStatefulSet.
func (in *RecorderStatefulSet) DeepCopy() *RecorderStatefulSet {
if in == nil {
return nil
}
out := new(RecorderStatefulSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RecorderStatus) DeepCopyInto(out *RecorderStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]TailnetDevice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderStatus.
func (in *RecorderStatus) DeepCopy() *RecorderStatus {
if in == nil {
return nil
}
out := new(RecorderStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Routes) DeepCopyInto(out *Routes) { func (in Routes) DeepCopyInto(out *Routes) {
{ {
@ -534,6 +759,53 @@ func (in Routes) DeepCopy() Routes {
return *out return *out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *S3) DeepCopyInto(out *S3) {
*out = *in
out.Credentials = in.Credentials
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3.
func (in *S3) DeepCopy() *S3 {
if in == nil {
return nil
}
out := new(S3)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *S3Credentials) DeepCopyInto(out *S3Credentials) {
*out = *in
out.Secret = in.Secret
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Credentials.
func (in *S3Credentials) DeepCopy() *S3Credentials {
if in == nil {
return nil
}
out := new(S3Credentials)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *S3Secret) DeepCopyInto(out *S3Secret) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Secret.
func (in *S3Secret) DeepCopy() *S3Secret {
if in == nil {
return nil
}
out := new(S3Secret)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { func (in *StatefulSet) DeepCopyInto(out *StatefulSet) {
*out = *in *out = *in
@ -568,6 +840,26 @@ func (in *StatefulSet) DeepCopy() *StatefulSet {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Storage) DeepCopyInto(out *Storage) {
*out = *in
if in.S3 != nil {
in, out := &in.S3, &out.S3
*out = new(S3)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage.
func (in *Storage) DeepCopy() *Storage {
if in == nil {
return nil
}
out := new(Storage)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SubnetRouter) DeepCopyInto(out *SubnetRouter) { func (in *SubnetRouter) DeepCopyInto(out *SubnetRouter) {
*out = *in *out = *in
@ -607,6 +899,26 @@ func (in Tags) DeepCopy() Tags {
return *out return *out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TailnetDevice) DeepCopyInto(out *TailnetDevice) {
*out = *in
if in.TailnetIPs != nil {
in, out := &in.TailnetIPs, &out.TailnetIPs
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TailnetDevice.
func (in *TailnetDevice) DeepCopy() *TailnetDevice {
if in == nil {
return nil
}
out := new(TailnetDevice)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TailscaleConfig) DeepCopyInto(out *TailscaleConfig) { func (in *TailscaleConfig) DeepCopyInto(out *TailscaleConfig) {
*out = *in *out = *in

@ -63,6 +63,14 @@ func RemoveServiceCondition(svc *corev1.Service, conditionType tsapi.ConditionTy
}) })
} }
// SetRecorderCondition ensures that Recorder status has a condition with the
// given attributes. LastTransitionTime gets set every time condition's status
// changes.
func SetRecorderCondition(tsr *tsapi.Recorder, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string, gen int64, clock tstime.Clock, logger *zap.SugaredLogger) {
conds := updateCondition(tsr.Status.Conditions, conditionType, status, reason, message, gen, clock, logger)
tsr.Status.Conditions = conds
}
func updateCondition(conds []metav1.Condition, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string, gen int64, clock tstime.Clock, logger *zap.SugaredLogger) []metav1.Condition { func updateCondition(conds []metav1.Condition, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string, gen int64, clock tstime.Clock, logger *zap.SugaredLogger) []metav1.Condition {
newCondition := metav1.Condition{ newCondition := metav1.Condition{
Type: string(conditionType), Type: string(conditionType),

@ -20,4 +20,5 @@ const (
MetricConnectorWithSubnetRouterCount = "k8s_connector_subnetrouter_resources" MetricConnectorWithSubnetRouterCount = "k8s_connector_subnetrouter_resources"
MetricConnectorWithExitNodeCount = "k8s_connector_exitnode_resources" MetricConnectorWithExitNodeCount = "k8s_connector_exitnode_resources"
MetricNameserverCount = "k8s_nameserver_resources" MetricNameserverCount = "k8s_nameserver_resources"
MetricRecorderCount = "k8s_recorder_resources"
) )

Loading…
Cancel
Save