mirror of https://github.com/tailscale/tailscale/
all-kube: create Tailscale Service for HA kube-apiserver ProxyGroup (#16572)
Adds a new reconciler for ProxyGroups of type kube-apiserver that will provision a Tailscale Service for each replica to advertise. Adds two new condition types to the ProxyGroup, TailscaleServiceValid and TailscaleServiceConfigured, to post updates on the state of that reconciler in a way that's consistent with the service-pg reconciler. The created Tailscale Service name is configurable via a new ProxyGroup field spec.kubeAPISserver.ServiceName, which expects a string of the form "svc:<dns-label>". Lots of supporting changes were needed to implement this in a way that's consistent with other operator workflows, including: * Pulled containerboot's ensureServicesUnadvertised and certManager into kube/ libraries to be shared with k8s-proxy. Use those in k8s-proxy to aid Service cert sharing between replicas and graceful Service shutdown. * For certManager, add an initial wait to the cert loop to wait until the domain appears in the devices's netmap to avoid a guaranteed error on the first issue attempt when it's quick to start. * Made several methods in ingress-for-pg.go and svc-for-pg.go into functions to share with the new reconciler * Added a Resource struct to the owner refs stored in Tailscale Service annotations to be able to distinguish between Ingress- and ProxyGroup- based Services that need cleaning up in the Tailscale API. * Added a ListVIPServices method to the internal tailscale client to aid cleaning up orphaned Services * Support for reading config from a kube Secret, and partial support for config reloading, to prevent us having to force Pod restarts when config changes. * Fixed up the zap logger so it's possible to set debug log level. Updates #13358 Change-Id: Ia9607441157dd91fb9b6ecbc318eecbef446e116 Signed-off-by: Tom Proctor <tomhjp@users.noreply.github.com>pull/16615/head
parent
5adde9e3f3
commit
f421907c38
@ -0,0 +1,479 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
//go:build !plan9
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"maps"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
"tailscale.com/internal/client/tailscale"
|
||||||
|
tsoperator "tailscale.com/k8s-operator"
|
||||||
|
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||||
|
"tailscale.com/kube/k8s-proxy/conf"
|
||||||
|
"tailscale.com/kube/kubetypes"
|
||||||
|
"tailscale.com/tailcfg"
|
||||||
|
"tailscale.com/tstime"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
proxyPGFinalizerName = "tailscale.com/kube-apiserver-finalizer"
|
||||||
|
|
||||||
|
// Reasons for KubeAPIServerProxyValid condition.
|
||||||
|
reasonKubeAPIServerProxyInvalid = "KubeAPIServerProxyInvalid"
|
||||||
|
reasonKubeAPIServerProxyValid = "KubeAPIServerProxyValid"
|
||||||
|
|
||||||
|
// Reasons for KubeAPIServerProxyConfigured condition.
|
||||||
|
reasonKubeAPIServerProxyConfigured = "KubeAPIServerProxyConfigured"
|
||||||
|
reasonKubeAPIServerProxyNoBackends = "KubeAPIServerProxyNoBackends"
|
||||||
|
)
|
||||||
|
|
||||||
|
// KubeAPIServerTSServiceReconciler reconciles the Tailscale Services required for an
|
||||||
|
// HA deployment of the API Server Proxy.
|
||||||
|
type KubeAPIServerTSServiceReconciler struct {
|
||||||
|
client.Client
|
||||||
|
recorder record.EventRecorder
|
||||||
|
logger *zap.SugaredLogger
|
||||||
|
tsClient tsClient
|
||||||
|
tsNamespace string
|
||||||
|
lc localClient
|
||||||
|
defaultTags []string
|
||||||
|
operatorID string // stableID of the operator's Tailscale device
|
||||||
|
|
||||||
|
clock tstime.Clock
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reconcile is the entry point for the controller.
|
||||||
|
func (r *KubeAPIServerTSServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
|
||||||
|
logger := r.logger.With("ProxyGroup", req.Name)
|
||||||
|
logger.Debugf("starting reconcile")
|
||||||
|
defer logger.Debugf("reconcile finished")
|
||||||
|
|
||||||
|
pg := new(tsapi.ProxyGroup)
|
||||||
|
err = r.Get(ctx, req.NamespacedName, pg)
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
// Request object not found, could have been deleted after reconcile request.
|
||||||
|
logger.Debugf("ProxyGroup not found, assuming it was deleted")
|
||||||
|
return res, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return res, fmt.Errorf("failed to get ProxyGroup: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
serviceName := serviceNameForAPIServerProxy(pg)
|
||||||
|
logger = logger.With("Tailscale Service", serviceName)
|
||||||
|
|
||||||
|
if markedForDeletion(pg) {
|
||||||
|
logger.Debugf("ProxyGroup is being deleted, ensuring any created resources are cleaned up")
|
||||||
|
if err = r.maybeCleanup(ctx, serviceName, pg, logger); err != nil && strings.Contains(err.Error(), optimisticLockErrorMsg) {
|
||||||
|
logger.Infof("optimistic lock error, retrying: %s", err)
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = r.maybeProvision(ctx, serviceName, pg, logger)
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), optimisticLockErrorMsg) {
|
||||||
|
logger.Infof("optimistic lock error, retrying: %s", err)
|
||||||
|
return reconcile.Result{}, nil
|
||||||
|
}
|
||||||
|
return reconcile.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return reconcile.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// maybeProvision ensures that a Tailscale Service for this ProxyGroup exists
|
||||||
|
// and is up to date.
|
||||||
|
//
|
||||||
|
// Returns true if the operation resulted in a Tailscale Service update.
|
||||||
|
func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (err error) {
|
||||||
|
var dnsName string
|
||||||
|
oldPGStatus := pg.Status.DeepCopy()
|
||||||
|
defer func() {
|
||||||
|
podsAdvertising, podsErr := numberPodsAdvertising(ctx, r.Client, r.tsNamespace, pg.Name, serviceName)
|
||||||
|
if podsErr != nil {
|
||||||
|
err = errors.Join(err, fmt.Errorf("failed to get number of advertised Pods: %w", podsErr))
|
||||||
|
// Continue, updating the status with the best available information.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the ProxyGroup status with the Tailscale Service information
|
||||||
|
// Update the condition based on how many pods are advertising the service
|
||||||
|
conditionStatus := metav1.ConditionFalse
|
||||||
|
conditionReason := reasonKubeAPIServerProxyNoBackends
|
||||||
|
conditionMessage := fmt.Sprintf("%d/%d proxy backends ready and advertising", podsAdvertising, pgReplicas(pg))
|
||||||
|
|
||||||
|
pg.Status.URL = ""
|
||||||
|
if podsAdvertising > 0 {
|
||||||
|
// At least one pod is advertising the service, consider it configured
|
||||||
|
conditionStatus = metav1.ConditionTrue
|
||||||
|
conditionReason = reasonKubeAPIServerProxyConfigured
|
||||||
|
if dnsName != "" {
|
||||||
|
pg.Status.URL = "https://" + dnsName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, conditionStatus, conditionReason, conditionMessage, pg.Generation, r.clock, logger)
|
||||||
|
|
||||||
|
if !apiequality.Semantic.DeepEqual(oldPGStatus, &pg.Status) {
|
||||||
|
// An error encountered here should get returned by the Reconcile function.
|
||||||
|
err = errors.Join(err, r.Client.Status().Update(ctx, pg))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if !tsoperator.ProxyGroupAvailable(pg) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !slices.Contains(pg.Finalizers, proxyPGFinalizerName) {
|
||||||
|
// This log line is printed exactly once during initial provisioning,
|
||||||
|
// because once the finalizer is in place this block gets skipped. So,
|
||||||
|
// this is a nice place to tell the operator that the high level,
|
||||||
|
// multi-reconcile operation is underway.
|
||||||
|
logger.Info("provisioning Tailscale Service for ProxyGroup")
|
||||||
|
pg.Finalizers = append(pg.Finalizers, proxyPGFinalizerName)
|
||||||
|
if err := r.Update(ctx, pg); err != nil {
|
||||||
|
return fmt.Errorf("failed to add finalizer: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. Check there isn't a Tailscale Service with the same hostname
|
||||||
|
// already created and not owned by this ProxyGroup.
|
||||||
|
existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName)
|
||||||
|
if isErrorFeatureFlagNotEnabled(err) {
|
||||||
|
logger.Warn(msgFeatureFlagNotEnabled)
|
||||||
|
r.recorder.Event(pg, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled)
|
||||||
|
tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionFalse, reasonKubeAPIServerProxyInvalid, msgFeatureFlagNotEnabled, pg.Generation, r.clock, logger)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil && !isErrorTailscaleServiceNotFound(err) {
|
||||||
|
return fmt.Errorf("error getting Tailscale Service %q: %w", serviceName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
updatedAnnotations, err := exclusiveOwnerAnnotations(pg, r.operatorID, existingTSSvc)
|
||||||
|
if err != nil {
|
||||||
|
const instr = "To proceed, you can either manually delete the existing Tailscale Service or choose a different Service name in the ProxyGroup's spec.kubeAPIServer.serviceName field"
|
||||||
|
msg := fmt.Sprintf("error ensuring exclusive ownership of Tailscale Service %s: %v. %s", serviceName, err, instr)
|
||||||
|
logger.Warn(msg)
|
||||||
|
r.recorder.Event(pg, corev1.EventTypeWarning, "InvalidTailscaleService", msg)
|
||||||
|
tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionFalse, reasonKubeAPIServerProxyInvalid, msg, pg.Generation, r.clock, logger)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// After getting this far, we know the Tailscale Service is valid.
|
||||||
|
tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, reasonKubeAPIServerProxyValid, pg.Generation, r.clock, logger)
|
||||||
|
|
||||||
|
// Service tags are limited to matching the ProxyGroup's tags until we have
|
||||||
|
// support for querying peer caps for a Service-bound request.
|
||||||
|
serviceTags := r.defaultTags
|
||||||
|
if len(pg.Spec.Tags) > 0 {
|
||||||
|
serviceTags = pg.Spec.Tags.Stringify()
|
||||||
|
}
|
||||||
|
|
||||||
|
tsSvc := &tailscale.VIPService{
|
||||||
|
Name: serviceName,
|
||||||
|
Tags: serviceTags,
|
||||||
|
Ports: []string{"tcp:443"},
|
||||||
|
Comment: managedTSServiceComment,
|
||||||
|
Annotations: updatedAnnotations,
|
||||||
|
}
|
||||||
|
if existingTSSvc != nil {
|
||||||
|
tsSvc.Addrs = existingTSSvc.Addrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Ensure the Tailscale Service exists and is up to date.
|
||||||
|
if existingTSSvc == nil ||
|
||||||
|
!slices.Equal(tsSvc.Tags, existingTSSvc.Tags) ||
|
||||||
|
!ownersAreSetAndEqual(tsSvc, existingTSSvc) ||
|
||||||
|
!slices.Equal(tsSvc.Ports, existingTSSvc.Ports) {
|
||||||
|
logger.Infof("Ensuring Tailscale Service exists and is up to date")
|
||||||
|
if err := r.tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil {
|
||||||
|
return fmt.Errorf("error creating Tailscale Service: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Ensure that TLS Secret and RBAC exists.
|
||||||
|
tcd, err := tailnetCertDomain(ctx, r.lc)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error determining DNS name base: %w", err)
|
||||||
|
}
|
||||||
|
dnsName = serviceName.WithoutPrefix() + "." + tcd
|
||||||
|
if err = r.ensureCertResources(ctx, pg, dnsName); err != nil {
|
||||||
|
return fmt.Errorf("error ensuring cert resources: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Configure the Pods to advertise the Tailscale Service.
|
||||||
|
if err = r.maybeAdvertiseServices(ctx, pg, serviceName, logger); err != nil {
|
||||||
|
return fmt.Errorf("error updating advertised Tailscale Services: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5. Clean up any stale Tailscale Services from previous resource versions.
|
||||||
|
if err = r.maybeDeleteStaleServices(ctx, pg, logger); err != nil {
|
||||||
|
return fmt.Errorf("failed to delete stale Tailscale Services: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// maybeCleanup ensures that any resources, such as a Tailscale Service created for this Service, are cleaned up when the
|
||||||
|
// Service is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the Tailscale Service is only
|
||||||
|
// deleted if it does not contain any other owner references. If it does, the cleanup only removes the owner reference
|
||||||
|
// corresponding to this Service.
|
||||||
|
func (r *KubeAPIServerTSServiceReconciler) maybeCleanup(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (err error) {
|
||||||
|
ix := slices.Index(pg.Finalizers, proxyPGFinalizerName)
|
||||||
|
if ix < 0 {
|
||||||
|
logger.Debugf("no finalizer, nothing to do")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
logger.Infof("Ensuring that Service %q is cleaned up", serviceName)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err == nil {
|
||||||
|
err = r.deleteFinalizer(ctx, pg, logger)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if _, err = cleanupTailscaleService(ctx, r.tsClient, serviceName, r.operatorID, logger); err != nil {
|
||||||
|
return fmt.Errorf("error deleting Tailscale Service: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = cleanupCertResources(ctx, r.Client, r.lc, r.tsNamespace, pg.Name, serviceName); err != nil {
|
||||||
|
return fmt.Errorf("failed to clean up cert resources: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// maybeDeleteStaleServices deletes Services that have previously been created for
|
||||||
|
// this ProxyGroup but are no longer needed.
|
||||||
|
func (r *KubeAPIServerTSServiceReconciler) maybeDeleteStaleServices(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) error {
|
||||||
|
serviceName := serviceNameForAPIServerProxy(pg)
|
||||||
|
|
||||||
|
svcs, err := r.tsClient.ListVIPServices(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error listing Tailscale Services: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, svc := range svcs.VIPServices {
|
||||||
|
if svc.Name == serviceName {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
owners, err := parseOwnerAnnotation(&svc)
|
||||||
|
if err != nil {
|
||||||
|
logger.Warnf("error parsing owner annotation for Tailscale Service %s: %v", svc.Name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if owners == nil || len(owners.OwnerRefs) != 1 || owners.OwnerRefs[0].OperatorID != r.operatorID {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
owner := owners.OwnerRefs[0]
|
||||||
|
if owner.Resource == nil || owner.Resource.Kind != "ProxyGroup" || owner.Resource.UID != string(pg.UID) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Infof("Deleting Tailscale Service %s", svc.Name)
|
||||||
|
if err := r.tsClient.DeleteVIPService(ctx, svc.Name); err != nil && !isErrorTailscaleServiceNotFound(err) {
|
||||||
|
return fmt.Errorf("error deleting Tailscale Service %s: %w", svc.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = cleanupCertResources(ctx, r.Client, r.lc, r.tsNamespace, pg.Name, svc.Name); err != nil {
|
||||||
|
return fmt.Errorf("failed to clean up cert resources: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *KubeAPIServerTSServiceReconciler) deleteFinalizer(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) error {
|
||||||
|
pg.Finalizers = slices.DeleteFunc(pg.Finalizers, func(f string) bool {
|
||||||
|
return f == proxyPGFinalizerName
|
||||||
|
})
|
||||||
|
logger.Debugf("ensure %q finalizer is removed", proxyPGFinalizerName)
|
||||||
|
|
||||||
|
if err := r.Update(ctx, pg); err != nil {
|
||||||
|
return fmt.Errorf("failed to remove finalizer %q: %w", proxyPGFinalizerName, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *KubeAPIServerTSServiceReconciler) ensureCertResources(ctx context.Context, pg *tsapi.ProxyGroup, domain string) error {
|
||||||
|
secret := certSecret(pg.Name, r.tsNamespace, domain, pg)
|
||||||
|
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, secret, func(s *corev1.Secret) {
|
||||||
|
s.Labels = secret.Labels
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("failed to create or update Secret %s: %w", secret.Name, err)
|
||||||
|
}
|
||||||
|
role := certSecretRole(pg.Name, r.tsNamespace, domain)
|
||||||
|
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) {
|
||||||
|
r.Labels = role.Labels
|
||||||
|
r.Rules = role.Rules
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("failed to create or update Role %s: %w", role.Name, err)
|
||||||
|
}
|
||||||
|
rolebinding := certSecretRoleBinding(pg, r.tsNamespace, domain)
|
||||||
|
if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, rolebinding, func(rb *rbacv1.RoleBinding) {
|
||||||
|
rb.Labels = rolebinding.Labels
|
||||||
|
rb.Subjects = rolebinding.Subjects
|
||||||
|
rb.RoleRef = rolebinding.RoleRef
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("failed to create or update RoleBinding %s: %w", rolebinding.Name, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *KubeAPIServerTSServiceReconciler) maybeAdvertiseServices(ctx context.Context, pg *tsapi.ProxyGroup, serviceName tailcfg.ServiceName, logger *zap.SugaredLogger) error {
|
||||||
|
// Get all config Secrets for this ProxyGroup
|
||||||
|
cfgSecrets := &corev1.SecretList{}
|
||||||
|
if err := r.List(ctx, cfgSecrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeConfig))); err != nil {
|
||||||
|
return fmt.Errorf("failed to list config Secrets: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only advertise a Tailscale Service once the TLS certs required for
|
||||||
|
// serving it are available.
|
||||||
|
shouldBeAdvertised, err := hasCerts(ctx, r.Client, r.lc, r.tsNamespace, serviceName)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error checking TLS credentials provisioned for Tailscale Service %q: %w", serviceName, err)
|
||||||
|
}
|
||||||
|
var advertiseServices []string
|
||||||
|
if shouldBeAdvertised {
|
||||||
|
advertiseServices = []string{serviceName.String()}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range cfgSecrets.Items {
|
||||||
|
if len(s.Data[kubetypes.KubeAPIServerConfigFile]) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the existing config.
|
||||||
|
cfg, err := conf.Load(s.Data[kubetypes.KubeAPIServerConfigFile])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error loading config from Secret %q: %w", s.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Parsed.APIServerProxy == nil {
|
||||||
|
return fmt.Errorf("config Secret %q does not contain APIServerProxy config", s.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
existingCfgSecret := s.DeepCopy()
|
||||||
|
|
||||||
|
var updated bool
|
||||||
|
if cfg.Parsed.APIServerProxy.ServiceName == nil || *cfg.Parsed.APIServerProxy.ServiceName != serviceName {
|
||||||
|
cfg.Parsed.APIServerProxy.ServiceName = &serviceName
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the services to advertise if required.
|
||||||
|
if !slices.Equal(cfg.Parsed.AdvertiseServices, advertiseServices) {
|
||||||
|
cfg.Parsed.AdvertiseServices = advertiseServices
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if !updated {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the config Secret.
|
||||||
|
cfgB, err := json.Marshal(conf.VersionedConfig{
|
||||||
|
Version: "v1alpha1",
|
||||||
|
ConfigV1Alpha1: &cfg.Parsed,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Data[kubetypes.KubeAPIServerConfigFile] = cfgB
|
||||||
|
if !apiequality.Semantic.DeepEqual(existingCfgSecret, s) {
|
||||||
|
logger.Debugf("Updating the Tailscale Services in ProxyGroup config Secret %s", s.Name)
|
||||||
|
if err := r.Update(ctx, &s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func serviceNameForAPIServerProxy(pg *tsapi.ProxyGroup) tailcfg.ServiceName {
|
||||||
|
if pg.Spec.KubeAPIServer != nil && pg.Spec.KubeAPIServer.Hostname != "" {
|
||||||
|
return tailcfg.ServiceName("svc:" + pg.Spec.KubeAPIServer.Hostname)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tailcfg.ServiceName("svc:" + pg.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// exclusiveOwnerAnnotations returns the updated annotations required to ensure this
|
||||||
|
// instance of the operator is the exclusive owner. If the Tailscale Service is not
|
||||||
|
// nil, but does not contain an owner reference we return an error as this likely means
|
||||||
|
// that the Service was created by something other than a Tailscale Kubernetes operator.
|
||||||
|
// We also error if it is already owned by another operator instance, as we do not
|
||||||
|
// want to load balance a kube-apiserver ProxyGroup across multiple clusters.
|
||||||
|
func exclusiveOwnerAnnotations(pg *tsapi.ProxyGroup, operatorID string, svc *tailscale.VIPService) (map[string]string, error) {
|
||||||
|
ref := OwnerRef{
|
||||||
|
OperatorID: operatorID,
|
||||||
|
Resource: &Resource{
|
||||||
|
Kind: "ProxyGroup",
|
||||||
|
Name: pg.Name,
|
||||||
|
UID: string(pg.UID),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if svc == nil {
|
||||||
|
c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}}
|
||||||
|
json, err := json.Marshal(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("[unexpected] unable to marshal Tailscale Service's owner annotation contents: %w, please report this", err)
|
||||||
|
}
|
||||||
|
return map[string]string{
|
||||||
|
ownerAnnotation: string(json),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
o, err := parseOwnerAnnotation(svc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if o == nil || len(o.OwnerRefs) == 0 {
|
||||||
|
return nil, fmt.Errorf("Tailscale Service %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name)
|
||||||
|
}
|
||||||
|
if len(o.OwnerRefs) > 1 || o.OwnerRefs[0].OperatorID != operatorID {
|
||||||
|
return nil, fmt.Errorf("Tailscale Service %s is already owned by other operator(s) and cannot be shared across multiple clusters; configure a difference Service name to continue", svc.Name)
|
||||||
|
}
|
||||||
|
if o.OwnerRefs[0].Resource == nil {
|
||||||
|
return nil, fmt.Errorf("Tailscale Service %s exists, but does not reference an owning resource; not proceeding as this is likely a Service already owned by an Ingress", svc.Name)
|
||||||
|
}
|
||||||
|
if o.OwnerRefs[0].Resource.Kind != "ProxyGroup" || o.OwnerRefs[0].Resource.UID != string(pg.UID) {
|
||||||
|
return nil, fmt.Errorf("Tailscale Service %s is already owned by another resource: %#v; configure a difference Service name to continue", svc.Name, o.OwnerRefs[0].Resource)
|
||||||
|
}
|
||||||
|
if o.OwnerRefs[0].Resource.Name != pg.Name {
|
||||||
|
// ProxyGroup name can be updated in place.
|
||||||
|
o.OwnerRefs[0].Resource.Name = pg.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
oBytes, err := json.Marshal(o)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
newAnnots := make(map[string]string, len(svc.Annotations)+1)
|
||||||
|
maps.Copy(newAnnots, svc.Annotations)
|
||||||
|
newAnnots[ownerAnnotation] = string(oBytes)
|
||||||
|
|
||||||
|
return newAnnots, nil
|
||||||
|
}
|
||||||
@ -0,0 +1,384 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||||
|
"tailscale.com/internal/client/tailscale"
|
||||||
|
"tailscale.com/ipn/ipnstate"
|
||||||
|
tsoperator "tailscale.com/k8s-operator"
|
||||||
|
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||||
|
"tailscale.com/kube/k8s-proxy/conf"
|
||||||
|
"tailscale.com/kube/kubetypes"
|
||||||
|
"tailscale.com/tailcfg"
|
||||||
|
"tailscale.com/tstest"
|
||||||
|
"tailscale.com/types/opt"
|
||||||
|
"tailscale.com/types/ptr"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAPIServerProxyReconciler(t *testing.T) {
|
||||||
|
const (
|
||||||
|
pgName = "test-pg"
|
||||||
|
pgUID = "test-pg-uid"
|
||||||
|
ns = "operator-ns"
|
||||||
|
defaultDomain = "test-pg.ts.net"
|
||||||
|
)
|
||||||
|
pg := &tsapi.ProxyGroup{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: pgName,
|
||||||
|
Generation: 1,
|
||||||
|
UID: pgUID,
|
||||||
|
},
|
||||||
|
Spec: tsapi.ProxyGroupSpec{
|
||||||
|
Type: tsapi.ProxyGroupTypeKubernetesAPIServer,
|
||||||
|
},
|
||||||
|
Status: tsapi.ProxyGroupStatus{
|
||||||
|
Conditions: []metav1.Condition{
|
||||||
|
{
|
||||||
|
Type: string(tsapi.ProxyGroupAvailable),
|
||||||
|
Status: metav1.ConditionTrue,
|
||||||
|
ObservedGeneration: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
initialCfg := &conf.VersionedConfig{
|
||||||
|
Version: "v1alpha1",
|
||||||
|
ConfigV1Alpha1: &conf.ConfigV1Alpha1{
|
||||||
|
AuthKey: ptr.To("test-key"),
|
||||||
|
APIServerProxy: &conf.APIServerProxyConfig{
|
||||||
|
Enabled: opt.NewBool(true),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
expectedCfg := *initialCfg
|
||||||
|
initialCfgB, err := json.Marshal(initialCfg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshaling initial config: %v", err)
|
||||||
|
}
|
||||||
|
pgCfgSecret := &corev1.Secret{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: pgConfigSecretName(pgName, 0),
|
||||||
|
Namespace: ns,
|
||||||
|
Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig),
|
||||||
|
},
|
||||||
|
Data: map[string][]byte{
|
||||||
|
// Existing config should be preserved.
|
||||||
|
kubetypes.KubeAPIServerConfigFile: initialCfgB,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
fc := fake.NewClientBuilder().
|
||||||
|
WithScheme(tsapi.GlobalScheme).
|
||||||
|
WithObjects(pg, pgCfgSecret).
|
||||||
|
WithStatusSubresource(pg).
|
||||||
|
Build()
|
||||||
|
expectCfg := func(c *conf.VersionedConfig) {
|
||||||
|
t.Helper()
|
||||||
|
cBytes, err := json.Marshal(c)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshaling expected config: %v", err)
|
||||||
|
}
|
||||||
|
pgCfgSecret.Data[kubetypes.KubeAPIServerConfigFile] = cBytes
|
||||||
|
expectEqual(t, fc, pgCfgSecret)
|
||||||
|
}
|
||||||
|
|
||||||
|
ft := &fakeTSClient{}
|
||||||
|
ingressTSSvc := &tailscale.VIPService{
|
||||||
|
Name: "svc:some-ingress-hostname",
|
||||||
|
Comment: managedTSServiceComment,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
// No resource field.
|
||||||
|
ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id"}]}`,
|
||||||
|
},
|
||||||
|
Ports: []string{"tcp:443"},
|
||||||
|
Tags: []string{"tag:k8s"},
|
||||||
|
Addrs: []string{"5.6.7.8"},
|
||||||
|
}
|
||||||
|
ft.CreateOrUpdateVIPService(t.Context(), ingressTSSvc)
|
||||||
|
|
||||||
|
lc := &fakeLocalClient{
|
||||||
|
status: &ipnstate.Status{
|
||||||
|
CurrentTailnet: &ipnstate.TailnetStatus{
|
||||||
|
MagicDNSSuffix: "ts.net",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
r := &KubeAPIServerTSServiceReconciler{
|
||||||
|
Client: fc,
|
||||||
|
tsClient: ft,
|
||||||
|
defaultTags: []string{"tag:k8s"},
|
||||||
|
tsNamespace: ns,
|
||||||
|
logger: zap.Must(zap.NewDevelopment()).Sugar(),
|
||||||
|
recorder: record.NewFakeRecorder(10),
|
||||||
|
lc: lc,
|
||||||
|
clock: tstest.NewClock(tstest.ClockOpts{}),
|
||||||
|
operatorID: "self-id",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a Tailscale Service that will conflict with the initial config.
|
||||||
|
if err := ft.CreateOrUpdateVIPService(t.Context(), &tailscale.VIPService{
|
||||||
|
Name: "svc:" + pgName,
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatalf("creating initial Tailscale Service: %v", err)
|
||||||
|
}
|
||||||
|
expectReconciled(t, r, "", pgName)
|
||||||
|
pg.ObjectMeta.Finalizers = []string{proxyPGFinalizerName}
|
||||||
|
tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionFalse, reasonKubeAPIServerProxyInvalid, "", 1, r.clock, r.logger)
|
||||||
|
tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionFalse, reasonKubeAPIServerProxyNoBackends, "", 1, r.clock, r.logger)
|
||||||
|
expectEqual(t, fc, pg, omitPGStatusConditionMessages)
|
||||||
|
expectMissing[corev1.Secret](t, fc, ns, defaultDomain)
|
||||||
|
expectMissing[rbacv1.Role](t, fc, ns, defaultDomain)
|
||||||
|
expectMissing[rbacv1.RoleBinding](t, fc, ns, defaultDomain)
|
||||||
|
expectEqual(t, fc, pgCfgSecret) // Unchanged.
|
||||||
|
|
||||||
|
// Delete Tailscale Service; should see Service created and valid condition updated to true.
|
||||||
|
if err := ft.DeleteVIPService(t.Context(), "svc:"+pgName); err != nil {
|
||||||
|
t.Fatalf("deleting initial Tailscale Service: %v", err)
|
||||||
|
}
|
||||||
|
expectReconciled(t, r, "", pgName)
|
||||||
|
|
||||||
|
tsSvc, err := ft.GetVIPService(t.Context(), "svc:"+pgName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("getting Tailscale Service: %v", err)
|
||||||
|
}
|
||||||
|
if tsSvc == nil {
|
||||||
|
t.Fatalf("expected Tailscale Service to be created, but got nil")
|
||||||
|
}
|
||||||
|
expectedTSSvc := &tailscale.VIPService{
|
||||||
|
Name: "svc:" + pgName,
|
||||||
|
Comment: managedTSServiceComment,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"test-pg","uid":"test-pg-uid"}}]}`,
|
||||||
|
},
|
||||||
|
Ports: []string{"tcp:443"},
|
||||||
|
Tags: []string{"tag:k8s"},
|
||||||
|
Addrs: []string{"5.6.7.8"},
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(tsSvc, expectedTSSvc) {
|
||||||
|
t.Fatalf("expected Tailscale Service to be %+v, got %+v", expectedTSSvc, tsSvc)
|
||||||
|
}
|
||||||
|
tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.logger)
|
||||||
|
tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionFalse, reasonKubeAPIServerProxyNoBackends, "", 1, r.clock, r.logger)
|
||||||
|
expectEqual(t, fc, pg, omitPGStatusConditionMessages)
|
||||||
|
|
||||||
|
expectedCfg.APIServerProxy.ServiceName = ptr.To(tailcfg.ServiceName("svc:" + pgName))
|
||||||
|
expectCfg(&expectedCfg)
|
||||||
|
|
||||||
|
expectEqual(t, fc, certSecret(pgName, ns, defaultDomain, pg))
|
||||||
|
expectEqual(t, fc, certSecretRole(pgName, ns, defaultDomain))
|
||||||
|
expectEqual(t, fc, certSecretRoleBinding(pg, ns, defaultDomain))
|
||||||
|
|
||||||
|
// Simulate certs being issued; should observe AdvertiseServices config change.
|
||||||
|
if err := populateTLSSecret(t.Context(), fc, pgName, defaultDomain); err != nil {
|
||||||
|
t.Fatalf("populating TLS Secret: %v", err)
|
||||||
|
}
|
||||||
|
expectReconciled(t, r, "", pgName)
|
||||||
|
|
||||||
|
expectedCfg.AdvertiseServices = []string{"svc:" + pgName}
|
||||||
|
expectCfg(&expectedCfg)
|
||||||
|
|
||||||
|
expectEqual(t, fc, pg, omitPGStatusConditionMessages) // Unchanged status.
|
||||||
|
|
||||||
|
// Simulate Pod prefs updated with advertised services; should see Configured condition updated to true.
|
||||||
|
mustCreate(t, fc, &corev1.Secret{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-pg-0",
|
||||||
|
Namespace: ns,
|
||||||
|
Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeState),
|
||||||
|
},
|
||||||
|
Data: map[string][]byte{
|
||||||
|
"_current-profile": []byte("profile-foo"),
|
||||||
|
"profile-foo": []byte(`{"AdvertiseServices":["svc:test-pg"],"Config":{"NodeID":"node-foo"}}`),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expectReconciled(t, r, "", pgName)
|
||||||
|
tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.logger)
|
||||||
|
pg.Status.URL = "https://" + defaultDomain
|
||||||
|
expectEqual(t, fc, pg, omitPGStatusConditionMessages)
|
||||||
|
|
||||||
|
// Rename the Tailscale Service - old one + cert resources should be cleaned up.
|
||||||
|
updatedServiceName := tailcfg.ServiceName("svc:test-pg-renamed")
|
||||||
|
updatedDomain := "test-pg-renamed.ts.net"
|
||||||
|
pg.Spec.KubeAPIServer = &tsapi.KubeAPIServerConfig{
|
||||||
|
Hostname: updatedServiceName.WithoutPrefix(),
|
||||||
|
}
|
||||||
|
mustUpdate(t, fc, "", pgName, func(p *tsapi.ProxyGroup) {
|
||||||
|
p.Spec.KubeAPIServer = pg.Spec.KubeAPIServer
|
||||||
|
})
|
||||||
|
expectReconciled(t, r, "", pgName)
|
||||||
|
_, err = ft.GetVIPService(t.Context(), "svc:"+pgName)
|
||||||
|
if !isErrorTailscaleServiceNotFound(err) {
|
||||||
|
t.Fatalf("Expected 404, got: %v", err)
|
||||||
|
}
|
||||||
|
tsSvc, err = ft.GetVIPService(t.Context(), updatedServiceName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Expected renamed svc, got error: %v", err)
|
||||||
|
}
|
||||||
|
expectedTSSvc.Name = updatedServiceName
|
||||||
|
if !reflect.DeepEqual(tsSvc, expectedTSSvc) {
|
||||||
|
t.Fatalf("expected Tailscale Service to be %+v, got %+v", expectedTSSvc, tsSvc)
|
||||||
|
}
|
||||||
|
// Check cfg and status reset until TLS certs are available again.
|
||||||
|
expectedCfg.APIServerProxy.ServiceName = ptr.To(updatedServiceName)
|
||||||
|
expectedCfg.AdvertiseServices = nil
|
||||||
|
expectCfg(&expectedCfg)
|
||||||
|
tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionFalse, reasonKubeAPIServerProxyNoBackends, "", 1, r.clock, r.logger)
|
||||||
|
pg.Status.URL = ""
|
||||||
|
expectEqual(t, fc, pg, omitPGStatusConditionMessages)
|
||||||
|
|
||||||
|
expectEqual(t, fc, certSecret(pgName, ns, updatedDomain, pg))
|
||||||
|
expectEqual(t, fc, certSecretRole(pgName, ns, updatedDomain))
|
||||||
|
expectEqual(t, fc, certSecretRoleBinding(pg, ns, updatedDomain))
|
||||||
|
expectMissing[corev1.Secret](t, fc, ns, defaultDomain)
|
||||||
|
expectMissing[rbacv1.Role](t, fc, ns, defaultDomain)
|
||||||
|
expectMissing[rbacv1.RoleBinding](t, fc, ns, defaultDomain)
|
||||||
|
|
||||||
|
// Check we get the new hostname in the status once ready.
|
||||||
|
if err := populateTLSSecret(t.Context(), fc, pgName, updatedDomain); err != nil {
|
||||||
|
t.Fatalf("populating TLS Secret: %v", err)
|
||||||
|
}
|
||||||
|
mustUpdate(t, fc, "operator-ns", "test-pg-0", func(s *corev1.Secret) {
|
||||||
|
s.Data["profile-foo"] = []byte(`{"AdvertiseServices":["svc:test-pg"],"Config":{"NodeID":"node-foo"}}`)
|
||||||
|
})
|
||||||
|
expectReconciled(t, r, "", pgName)
|
||||||
|
expectedCfg.AdvertiseServices = []string{updatedServiceName.String()}
|
||||||
|
expectCfg(&expectedCfg)
|
||||||
|
tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.logger)
|
||||||
|
pg.Status.URL = "https://" + updatedDomain
|
||||||
|
|
||||||
|
// Delete the ProxyGroup and verify Tailscale Service and cert resources are cleaned up.
|
||||||
|
if err := fc.Delete(t.Context(), pg); err != nil {
|
||||||
|
t.Fatalf("deleting ProxyGroup: %v", err)
|
||||||
|
}
|
||||||
|
expectReconciled(t, r, "", pgName)
|
||||||
|
expectMissing[corev1.Secret](t, fc, ns, updatedDomain)
|
||||||
|
expectMissing[rbacv1.Role](t, fc, ns, updatedDomain)
|
||||||
|
expectMissing[rbacv1.RoleBinding](t, fc, ns, updatedDomain)
|
||||||
|
_, err = ft.GetVIPService(t.Context(), updatedServiceName)
|
||||||
|
if !isErrorTailscaleServiceNotFound(err) {
|
||||||
|
t.Fatalf("Expected 404, got: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ingress Tailscale Service should not be affected.
|
||||||
|
svc, err := ft.GetVIPService(t.Context(), ingressTSSvc.Name)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("getting ingress Tailscale Service: %v", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(svc, ingressTSSvc) {
|
||||||
|
t.Fatalf("expected ingress Tailscale Service to be unmodified %+v, got %+v", ingressTSSvc, svc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExclusiveOwnerAnnotations(t *testing.T) {
|
||||||
|
pg := &tsapi.ProxyGroup{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pg1",
|
||||||
|
UID: "pg1-uid",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
const (
|
||||||
|
selfOperatorID = "self-id"
|
||||||
|
pg1Owner = `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"pg1","uid":"pg1-uid"}}]}`
|
||||||
|
)
|
||||||
|
|
||||||
|
for name, tc := range map[string]struct {
|
||||||
|
svc *tailscale.VIPService
|
||||||
|
wantErr string
|
||||||
|
}{
|
||||||
|
"no_svc": {
|
||||||
|
svc: nil,
|
||||||
|
},
|
||||||
|
"empty_svc": {
|
||||||
|
svc: &tailscale.VIPService{},
|
||||||
|
wantErr: "likely a resource created by something other than the Tailscale Kubernetes operator",
|
||||||
|
},
|
||||||
|
"already_owner": {
|
||||||
|
svc: &tailscale.VIPService{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
ownerAnnotation: pg1Owner,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"already_owner_name_updated": {
|
||||||
|
svc: &tailscale.VIPService{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"old-pg1-name","uid":"pg1-uid"}}]}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"preserves_existing_annotations": {
|
||||||
|
svc: &tailscale.VIPService{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"existing": "annotation",
|
||||||
|
ownerAnnotation: pg1Owner,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"owned_by_another_operator": {
|
||||||
|
svc: &tailscale.VIPService{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
ownerAnnotation: `{"ownerRefs":[{"operatorID":"operator-2"}]}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: "already owned by other operator(s)",
|
||||||
|
},
|
||||||
|
"owned_by_an_ingress": {
|
||||||
|
svc: &tailscale.VIPService{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id"}]}`, // Ingress doesn't set Resource field (yet).
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: "does not reference an owning resource",
|
||||||
|
},
|
||||||
|
"owned_by_another_pg": {
|
||||||
|
svc: &tailscale.VIPService{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"pg2","uid":"pg2-uid"}}]}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: "already owned by another resource",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
got, err := exclusiveOwnerAnnotations(pg, "self-id", tc.svc)
|
||||||
|
if tc.wantErr != "" {
|
||||||
|
if !strings.Contains(err.Error(), tc.wantErr) {
|
||||||
|
t.Errorf("exclusiveOwnerAnnotations() error = %v, wantErr %v", err, tc.wantErr)
|
||||||
|
}
|
||||||
|
} else if diff := cmp.Diff(pg1Owner, got[ownerAnnotation]); diff != "" {
|
||||||
|
t.Errorf("exclusiveOwnerAnnotations() mismatch (-want +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
if tc.svc == nil {
|
||||||
|
return // Don't check annotations being preserved.
|
||||||
|
}
|
||||||
|
for k, v := range tc.svc.Annotations {
|
||||||
|
if k == ownerAnnotation {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if got[k] != v {
|
||||||
|
t.Errorf("exclusiveOwnerAnnotations() did not preserve annotation %q: got %q, want %q", k, got[k], v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func omitPGStatusConditionMessages(p *tsapi.ProxyGroup) {
|
||||||
|
for i := range p.Status.Conditions {
|
||||||
|
// Don't bother validating the message.
|
||||||
|
p.Status.Conditions[i].Message = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -0,0 +1,264 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
//go:build !plan9
|
||||||
|
|
||||||
|
// Package config provides watchers for the various supported ways to load a
|
||||||
|
// config file for k8s-proxy; currently file or Kubernetes Secret.
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/fsnotify/fsnotify"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
|
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
|
"tailscale.com/kube/k8s-proxy/conf"
|
||||||
|
"tailscale.com/kube/kubetypes"
|
||||||
|
"tailscale.com/types/ptr"
|
||||||
|
"tailscale.com/util/testenv"
|
||||||
|
)
|
||||||
|
|
||||||
|
type configLoader struct {
|
||||||
|
logger *zap.SugaredLogger
|
||||||
|
client clientcorev1.CoreV1Interface
|
||||||
|
|
||||||
|
cfgChan chan<- *conf.Config
|
||||||
|
previous []byte
|
||||||
|
|
||||||
|
once sync.Once // For use in tests. To close cfgIgnored.
|
||||||
|
cfgIgnored chan struct{} // For use in tests.
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewConfigLoader(logger *zap.SugaredLogger, client clientcorev1.CoreV1Interface, cfgChan chan<- *conf.Config) *configLoader {
|
||||||
|
return &configLoader{
|
||||||
|
logger: logger,
|
||||||
|
client: client,
|
||||||
|
cfgChan: cfgChan,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *configLoader) WatchConfig(ctx context.Context, path string) error {
|
||||||
|
secretNamespacedName, isKubeSecret := strings.CutPrefix(path, "kube:")
|
||||||
|
if isKubeSecret {
|
||||||
|
secretNamespace, secretName, ok := strings.Cut(secretNamespacedName, string(types.Separator))
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("invalid Kubernetes Secret reference %q, expected format <namespace>/<name>", path)
|
||||||
|
}
|
||||||
|
if err := l.watchConfigSecretChanges(ctx, secretNamespace, secretName); err != nil && !errors.Is(err, context.Canceled) {
|
||||||
|
return fmt.Errorf("error watching config Secret %q: %w", secretNamespacedName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := l.watchConfigFileChanges(ctx, path); err != nil && !errors.Is(err, context.Canceled) {
|
||||||
|
return fmt.Errorf("error watching config file %q: %w", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *configLoader) reloadConfig(ctx context.Context, raw []byte) error {
|
||||||
|
if bytes.Equal(raw, l.previous) {
|
||||||
|
if l.cfgIgnored != nil && testenv.InTest() {
|
||||||
|
l.once.Do(func() {
|
||||||
|
close(l.cfgIgnored)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := conf.Load(raw)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error loading config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case l.cfgChan <- &cfg:
|
||||||
|
}
|
||||||
|
|
||||||
|
l.previous = raw
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string) error {
|
||||||
|
var (
|
||||||
|
tickChan <-chan time.Time
|
||||||
|
eventChan <-chan fsnotify.Event
|
||||||
|
errChan <-chan error
|
||||||
|
)
|
||||||
|
|
||||||
|
if w, err := fsnotify.NewWatcher(); err != nil {
|
||||||
|
// Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor.
|
||||||
|
// See https://github.com/tailscale/tailscale/issues/15081
|
||||||
|
l.logger.Infof("Failed to create fsnotify watcher on config file %q; watching for changes on 5s timer: %v", path, err)
|
||||||
|
ticker := time.NewTicker(5 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
tickChan = ticker.C
|
||||||
|
} else {
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
file := filepath.Base(path)
|
||||||
|
l.logger.Infof("Watching directory %q for changes to config file %q", dir, file)
|
||||||
|
defer w.Close()
|
||||||
|
if err := w.Add(dir); err != nil {
|
||||||
|
return fmt.Errorf("failed to add fsnotify watch: %w", err)
|
||||||
|
}
|
||||||
|
eventChan = w.Events
|
||||||
|
errChan = w.Errors
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the initial config file, but after the watcher is already set up to
|
||||||
|
// avoid an unlucky race condition if the config file is edited in between.
|
||||||
|
b, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading config file %q: %w", path, err)
|
||||||
|
}
|
||||||
|
if err := l.reloadConfig(ctx, b); err != nil {
|
||||||
|
return fmt.Errorf("error loading initial config file %q: %w", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case err, ok := <-errChan:
|
||||||
|
if !ok {
|
||||||
|
// Watcher was closed.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("watcher error: %w", err)
|
||||||
|
case <-tickChan:
|
||||||
|
case ev, ok := <-eventChan:
|
||||||
|
if !ok {
|
||||||
|
// Watcher was closed.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if ev.Name != path || ev.Op&fsnotify.Write == 0 {
|
||||||
|
// Ignore irrelevant events.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading config file: %w", err)
|
||||||
|
}
|
||||||
|
// Writers such as os.WriteFile may truncate the file before writing
|
||||||
|
// new contents, so it's possible to read an empty file if we read before
|
||||||
|
// the write has completed.
|
||||||
|
if len(b) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := l.reloadConfig(ctx, b); err != nil {
|
||||||
|
return fmt.Errorf("error reloading config file %q: %v", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNamespace, secretName string) error {
|
||||||
|
secrets := l.client.Secrets(secretNamespace)
|
||||||
|
w, err := secrets.Watch(ctx, metav1.ListOptions{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
Kind: "Secret",
|
||||||
|
APIVersion: "v1",
|
||||||
|
},
|
||||||
|
// Re-watch regularly to avoid relying on long-lived connections.
|
||||||
|
// See https://github.com/kubernetes-client/javascript/issues/596#issuecomment-786419380
|
||||||
|
TimeoutSeconds: ptr.To(int64(600)),
|
||||||
|
FieldSelector: fmt.Sprintf("metadata.name=%s", secretName),
|
||||||
|
Watch: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to watch config Secret %q: %w", secretName, err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
// May not be the original watcher by the time we exit.
|
||||||
|
if w != nil {
|
||||||
|
w.Stop()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Get the initial config Secret now we've got the watcher set up.
|
||||||
|
secret, err := secrets.Get(ctx, secretName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get config Secret %q: %w", secretName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := l.configFromSecret(ctx, secret); err != nil {
|
||||||
|
return fmt.Errorf("error loading initial config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.logger.Infof("Watching config Secret %q for changes", secretName)
|
||||||
|
for {
|
||||||
|
var secret *corev1.Secret
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case ev, ok := <-w.ResultChan():
|
||||||
|
if !ok {
|
||||||
|
w.Stop()
|
||||||
|
w, err = secrets.Watch(ctx, metav1.ListOptions{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
Kind: "Secret",
|
||||||
|
APIVersion: "v1",
|
||||||
|
},
|
||||||
|
TimeoutSeconds: ptr.To(int64(600)),
|
||||||
|
FieldSelector: fmt.Sprintf("metadata.name=%s", secretName),
|
||||||
|
Watch: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to re-watch config Secret %q: %w", secretName, err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch ev.Type {
|
||||||
|
case watch.Added, watch.Modified:
|
||||||
|
// New config available to load.
|
||||||
|
var ok bool
|
||||||
|
secret, ok = ev.Object.(*corev1.Secret)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected object type %T in watch event for config Secret %q", ev.Object, secretName)
|
||||||
|
}
|
||||||
|
if secret == nil || secret.Data == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := l.configFromSecret(ctx, secret); err != nil {
|
||||||
|
return fmt.Errorf("error reloading config Secret %q: %v", secret.Name, err)
|
||||||
|
}
|
||||||
|
case watch.Error:
|
||||||
|
return fmt.Errorf("error watching config Secret %q: %v", secretName, ev.Object)
|
||||||
|
default:
|
||||||
|
// Ignore, no action required.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *configLoader) configFromSecret(ctx context.Context, s *corev1.Secret) error {
|
||||||
|
b := s.Data[kubetypes.KubeAPIServerConfigFile]
|
||||||
|
if len(b) == 0 {
|
||||||
|
return fmt.Errorf("config Secret %q does not contain expected config in key %q", s.Name, kubetypes.KubeAPIServerConfigFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := l.reloadConfig(ctx, b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@ -0,0 +1,245 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
ktesting "k8s.io/client-go/testing"
|
||||||
|
"tailscale.com/kube/k8s-proxy/conf"
|
||||||
|
"tailscale.com/kube/kubetypes"
|
||||||
|
"tailscale.com/types/ptr"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWatchConfig(t *testing.T) {
|
||||||
|
type phase struct {
|
||||||
|
config string
|
||||||
|
cancel bool
|
||||||
|
expectedConf *conf.ConfigV1Alpha1
|
||||||
|
expectedErr string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Same set of behaviour tests for each config source.
|
||||||
|
for _, env := range []string{"file", "kube"} {
|
||||||
|
t.Run(env, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
initialConfig string
|
||||||
|
phases []phase
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no_config",
|
||||||
|
phases: []phase{{
|
||||||
|
expectedErr: "error loading initial config",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid_config",
|
||||||
|
initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`,
|
||||||
|
phases: []phase{{
|
||||||
|
expectedConf: &conf.ConfigV1Alpha1{
|
||||||
|
AuthKey: ptr.To("abc123"),
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "can_cancel",
|
||||||
|
initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`,
|
||||||
|
phases: []phase{
|
||||||
|
{
|
||||||
|
expectedConf: &conf.ConfigV1Alpha1{
|
||||||
|
AuthKey: ptr.To("abc123"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cancel: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "can_reload",
|
||||||
|
initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`,
|
||||||
|
phases: []phase{
|
||||||
|
{
|
||||||
|
expectedConf: &conf.ConfigV1Alpha1{
|
||||||
|
AuthKey: ptr.To("abc123"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: `{"version": "v1alpha1", "authKey": "def456"}`,
|
||||||
|
expectedConf: &conf.ConfigV1Alpha1{
|
||||||
|
AuthKey: ptr.To("def456"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ignores_events_with_no_changes",
|
||||||
|
initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`,
|
||||||
|
phases: []phase{
|
||||||
|
{
|
||||||
|
expectedConf: &conf.ConfigV1Alpha1{
|
||||||
|
AuthKey: ptr.To("abc123"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
config: `{"version": "v1alpha1", "authKey": "abc123"}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
root := t.TempDir()
|
||||||
|
cl := fake.NewClientset()
|
||||||
|
|
||||||
|
var cfgPath string
|
||||||
|
var writeFile func(*testing.T, string)
|
||||||
|
if env == "file" {
|
||||||
|
cfgPath = filepath.Join(root, kubetypes.KubeAPIServerConfigFile)
|
||||||
|
writeFile = func(t *testing.T, content string) {
|
||||||
|
if err := os.WriteFile(cfgPath, []byte(content), 0o644); err != nil {
|
||||||
|
t.Fatalf("error writing config file %q: %v", cfgPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
cfgPath = "kube:default/config-secret"
|
||||||
|
writeFile = func(t *testing.T, content string) {
|
||||||
|
s := secretFrom(content)
|
||||||
|
mustCreateOrUpdate(t, cl, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
configChan := make(chan *conf.Config)
|
||||||
|
l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan)
|
||||||
|
l.cfgIgnored = make(chan struct{})
|
||||||
|
errs := make(chan error)
|
||||||
|
ctx, cancel := context.WithCancel(t.Context())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
writeFile(t, tc.initialConfig)
|
||||||
|
go func() {
|
||||||
|
errs <- l.WatchConfig(ctx, cfgPath)
|
||||||
|
}()
|
||||||
|
|
||||||
|
for i, p := range tc.phases {
|
||||||
|
if p.config != "" {
|
||||||
|
writeFile(t, p.config)
|
||||||
|
}
|
||||||
|
if p.cancel {
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case cfg := <-configChan:
|
||||||
|
if diff := cmp.Diff(*p.expectedConf, cfg.Parsed); diff != "" {
|
||||||
|
t.Errorf("unexpected config (-want +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
case err := <-errs:
|
||||||
|
if p.cancel {
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error after cancel: %v", err)
|
||||||
|
}
|
||||||
|
} else if p.expectedErr == "" {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
} else if !strings.Contains(err.Error(), p.expectedErr) {
|
||||||
|
t.Fatalf("expected error to contain %q, got %q", p.expectedErr, err.Error())
|
||||||
|
}
|
||||||
|
case <-l.cfgIgnored:
|
||||||
|
if p.expectedConf != nil {
|
||||||
|
t.Fatalf("expected config to be reloaded, but got ignored signal")
|
||||||
|
}
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
t.Fatalf("timed out waiting for expected event in phase: %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWatchConfigSecret_Rewatches(t *testing.T) {
|
||||||
|
cl := fake.NewClientset()
|
||||||
|
var watchCount int
|
||||||
|
var watcher *watch.RaceFreeFakeWatcher
|
||||||
|
expected := []string{
|
||||||
|
`{"version": "v1alpha1", "authKey": "abc123"}`,
|
||||||
|
`{"version": "v1alpha1", "authKey": "def456"}`,
|
||||||
|
`{"version": "v1alpha1", "authKey": "ghi789"}`,
|
||||||
|
}
|
||||||
|
cl.PrependWatchReactor("secrets", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) {
|
||||||
|
watcher = watch.NewRaceFreeFake()
|
||||||
|
watcher.Add(secretFrom(expected[watchCount]))
|
||||||
|
if action.GetVerb() == "watch" && action.GetResource().Resource == "secrets" {
|
||||||
|
watchCount++
|
||||||
|
}
|
||||||
|
return true, watcher, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
configChan := make(chan *conf.Config)
|
||||||
|
l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan)
|
||||||
|
|
||||||
|
mustCreateOrUpdate(t, cl, secretFrom(expected[0]))
|
||||||
|
|
||||||
|
errs := make(chan error)
|
||||||
|
go func() {
|
||||||
|
errs <- l.watchConfigSecretChanges(t.Context(), "default", "config-secret")
|
||||||
|
}()
|
||||||
|
|
||||||
|
for i := range 2 {
|
||||||
|
select {
|
||||||
|
case cfg := <-configChan:
|
||||||
|
if exp := expected[i]; cfg.Parsed.AuthKey == nil || !strings.Contains(exp, *cfg.Parsed.AuthKey) {
|
||||||
|
t.Fatalf("expected config to have authKey %q, got: %v", exp, cfg.Parsed.AuthKey)
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
watcher.Stop()
|
||||||
|
}
|
||||||
|
case err := <-errs:
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
case <-l.cfgIgnored:
|
||||||
|
t.Fatalf("expected config to be reloaded, but got ignored signal")
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
t.Fatalf("timed out waiting for expected event")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if watchCount != 2 {
|
||||||
|
t.Fatalf("expected 2 watch API calls, got %d", watchCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func secretFrom(content string) *corev1.Secret {
|
||||||
|
return &corev1.Secret{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "config-secret",
|
||||||
|
},
|
||||||
|
Data: map[string][]byte{
|
||||||
|
kubetypes.KubeAPIServerConfigFile: []byte(content),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustCreateOrUpdate(t *testing.T, cl *fake.Clientset, s *corev1.Secret) {
|
||||||
|
t.Helper()
|
||||||
|
if _, err := cl.CoreV1().Secrets("default").Create(t.Context(), s, metav1.CreateOptions{}); err != nil {
|
||||||
|
if _, updateErr := cl.CoreV1().Secrets("default").Update(t.Context(), s, metav1.UpdateOptions{}); updateErr != nil {
|
||||||
|
t.Fatalf("error writing config Secret %q: %v", s.Name, updateErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -0,0 +1,35 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
package localclient
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"tailscale.com/ipn"
|
||||||
|
)
|
||||||
|
|
||||||
|
type FakeLocalClient struct {
|
||||||
|
FakeIPNBusWatcher
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FakeLocalClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) {
|
||||||
|
return &f.FakeIPNBusWatcher, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FakeLocalClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) {
|
||||||
|
return nil, nil, fmt.Errorf("CertPair not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
type FakeIPNBusWatcher struct {
|
||||||
|
NotifyChan chan ipn.Notify
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FakeIPNBusWatcher) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FakeIPNBusWatcher) Next() (ipn.Notify, error) {
|
||||||
|
return <-f.NotifyChan, nil
|
||||||
|
}
|
||||||
@ -0,0 +1,49 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Package localclient provides an interface for all the local.Client methods
|
||||||
|
// kube needs to use, so that we can easily mock it in tests.
|
||||||
|
package localclient
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"tailscale.com/client/local"
|
||||||
|
"tailscale.com/ipn"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LocalClient is roughly a subset of the local.Client struct's methods, used
|
||||||
|
// for easier testing.
|
||||||
|
type LocalClient interface {
|
||||||
|
WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error)
|
||||||
|
CertIssuer
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPNBusWatcher is local.IPNBusWatcher's methods restated in an interface to
|
||||||
|
// allow for easier mocking in tests.
|
||||||
|
type IPNBusWatcher interface {
|
||||||
|
io.Closer
|
||||||
|
Next() (ipn.Notify, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type CertIssuer interface {
|
||||||
|
CertPair(context.Context, string) ([]byte, []byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a LocalClient that wraps the provided local.Client.
|
||||||
|
func New(lc *local.Client) LocalClient {
|
||||||
|
return &localClient{lc: lc}
|
||||||
|
}
|
||||||
|
|
||||||
|
type localClient struct {
|
||||||
|
lc *local.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *localClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) {
|
||||||
|
return l.lc.WatchIPNBus(ctx, mask)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *localClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) {
|
||||||
|
return l.lc.CertPair(ctx, domain)
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue