all: rename variables with lowercase-l/uppercase-I

See http://go/no-ell

Signed-off-by: Alex Chan <alexc@tailscale.com>

Updates #cleanup

Change-Id: I8c976b51ce7a60f06315048b1920516129cc1d5d
pull/17961/head
Alex Chan 2 weeks ago committed by Alex Chan
parent 9048ea25db
commit c2e474e729

@ -203,12 +203,12 @@ func NewAppConnector(c Config) *AppConnector {
ac.wildcards = c.RouteInfo.Wildcards ac.wildcards = c.RouteInfo.Wildcards
ac.controlRoutes = c.RouteInfo.Control ac.controlRoutes = c.RouteInfo.Control
} }
ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, l int64) { ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, ln int64) {
ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, l) ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, ln)
metricStoreRoutes(c, l) metricStoreRoutes(c, ln)
}) })
ac.writeRateDay = newRateLogger(time.Now, 24*time.Hour, func(c int64, s time.Time, l int64) { ac.writeRateDay = newRateLogger(time.Now, 24*time.Hour, func(c int64, s time.Time, ln int64) {
ac.logf("routeInfo write rate: %d in 24 hours starting at %v (%d routes)", c, s, l) ac.logf("routeInfo write rate: %d in 24 hours starting at %v (%d routes)", c, s, ln)
}) })
return ac return ac
} }
@ -510,8 +510,8 @@ func (e *AppConnector) addDomainAddrLocked(domain string, addr netip.Addr) {
slices.SortFunc(e.domains[domain], compareAddr) slices.SortFunc(e.domains[domain], compareAddr)
} }
func compareAddr(l, r netip.Addr) int { func compareAddr(a, b netip.Addr) int {
return l.Compare(r) return a.Compare(b)
} }
// routesWithout returns a without b where a and b // routesWithout returns a without b where a and b

@ -31,11 +31,11 @@ func TestDoesNotOverwriteIrregularFiles(t *testing.T) {
// The least troublesome thing to make that is not a file is a unix socket. // The least troublesome thing to make that is not a file is a unix socket.
// Making a null device sadly requires root. // Making a null device sadly requires root.
l, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"}) ln, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer l.Close() defer ln.Close()
err = WriteFile(path, []byte("hello"), 0644) err = WriteFile(path, []byte("hello"), 0644)
if err == nil { if err == nil {

@ -24,7 +24,7 @@ type fakeBIRD struct {
func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD { func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD {
sock := filepath.Join(t.TempDir(), "sock") sock := filepath.Join(t.TempDir(), "sock")
l, err := net.Listen("unix", sock) ln, err := net.Listen("unix", sock)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -33,7 +33,7 @@ func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD {
pe[p] = false pe[p] = false
} }
return &fakeBIRD{ return &fakeBIRD{
Listener: l, Listener: ln,
protocolsEnabled: pe, protocolsEnabled: pe,
sock: sock, sock: sock,
} }
@ -123,12 +123,12 @@ type hangingListener struct {
func newHangingListener(t *testing.T) *hangingListener { func newHangingListener(t *testing.T) *hangingListener {
sock := filepath.Join(t.TempDir(), "sock") sock := filepath.Join(t.TempDir(), "sock")
l, err := net.Listen("unix", sock) ln, err := net.Listen("unix", sock)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
return &hangingListener{ return &hangingListener{
Listener: l, Listener: ln,
t: t, t: t,
done: make(chan struct{}), done: make(chan struct{}),
sock: sock, sock: sock,

@ -66,7 +66,7 @@ export default function useExitNodes(node: NodeData, filter?: string) {
// match from a list of exit node `options` to `nodes`. // match from a list of exit node `options` to `nodes`.
const addBestMatchNode = ( const addBestMatchNode = (
options: ExitNode[], options: ExitNode[],
name: (l: ExitNodeLocation) => string name: (loc: ExitNodeLocation) => string
) => { ) => {
const bestNode = highestPriorityNode(options) const bestNode = highestPriorityNode(options)
if (!bestNode || !bestNode.Location) { if (!bestNode || !bestNode.Location) {
@ -86,7 +86,7 @@ export default function useExitNodes(node: NodeData, filter?: string) {
locationNodesMap.forEach( locationNodesMap.forEach(
// add one node per country // add one node per country
(countryNodes) => (countryNodes) =>
addBestMatchNode(flattenMap(countryNodes), (l) => l.Country) addBestMatchNode(flattenMap(countryNodes), (loc) => loc.Country)
) )
} else { } else {
// Otherwise, show the best match on a city-level, // Otherwise, show the best match on a city-level,
@ -97,12 +97,12 @@ export default function useExitNodes(node: NodeData, filter?: string) {
countryNodes.forEach( countryNodes.forEach(
// add one node per city // add one node per city
(cityNodes) => (cityNodes) =>
addBestMatchNode(cityNodes, (l) => `${l.Country}: ${l.City}`) addBestMatchNode(cityNodes, (loc) => `${loc.Country}: ${loc.City}`)
) )
// add the "Country: Best Match" node // add the "Country: Best Match" node
addBestMatchNode( addBestMatchNode(
flattenMap(countryNodes), flattenMap(countryNodes),
(l) => `${l.Country}: Best Match` (loc) => `${loc.Country}: Best Match`
) )
}) })
} }

@ -418,13 +418,13 @@ func parseSynoinfo(path string) (string, error) {
// Extract the CPU in the middle (88f6282 in the above example). // Extract the CPU in the middle (88f6282 in the above example).
s := bufio.NewScanner(f) s := bufio.NewScanner(f)
for s.Scan() { for s.Scan() {
l := s.Text() line := s.Text()
if !strings.HasPrefix(l, "unique=") { if !strings.HasPrefix(line, "unique=") {
continue continue
} }
parts := strings.SplitN(l, "_", 3) parts := strings.SplitN(line, "_", 3)
if len(parts) != 3 { if len(parts) != 3 {
return "", fmt.Errorf(`malformed %q: found %q, expected format like 'unique="synology_$cpu_$model'`, path, l) return "", fmt.Errorf(`malformed %q: found %q, expected format like 'unique="synology_$cpu_$model'`, path, line)
} }
return parts[1], nil return parts[1], nil
} }

@ -1287,8 +1287,8 @@ type localAPI struct {
notify *ipn.Notify notify *ipn.Notify
} }
func (l *localAPI) Start() error { func (lc *localAPI) Start() error {
path := filepath.Join(l.FSRoot, "tmp/tailscaled.sock.fake") path := filepath.Join(lc.FSRoot, "tmp/tailscaled.sock.fake")
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
return err return err
} }
@ -1298,30 +1298,30 @@ func (l *localAPI) Start() error {
return err return err
} }
l.srv = &http.Server{ lc.srv = &http.Server{
Handler: l, Handler: lc,
} }
l.Path = path lc.Path = path
l.cond = sync.NewCond(&l.Mutex) lc.cond = sync.NewCond(&lc.Mutex)
go l.srv.Serve(ln) go lc.srv.Serve(ln)
return nil return nil
} }
func (l *localAPI) Close() { func (lc *localAPI) Close() {
l.srv.Close() lc.srv.Close()
} }
func (l *localAPI) Notify(n *ipn.Notify) { func (lc *localAPI) Notify(n *ipn.Notify) {
if n == nil { if n == nil {
return return
} }
l.Lock() lc.Lock()
defer l.Unlock() defer lc.Unlock()
l.notify = n lc.notify = n
l.cond.Broadcast() lc.cond.Broadcast()
} }
func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (lc *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path { switch r.URL.Path {
case "/localapi/v0/serve-config": case "/localapi/v0/serve-config":
if r.Method != "POST" { if r.Method != "POST" {
@ -1348,11 +1348,11 @@ func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
f.Flush() f.Flush()
} }
enc := json.NewEncoder(w) enc := json.NewEncoder(w)
l.Lock() lc.Lock()
defer l.Unlock() defer lc.Unlock()
for { for {
if l.notify != nil { if lc.notify != nil {
if err := enc.Encode(l.notify); err != nil { if err := enc.Encode(lc.notify); err != nil {
// Usually broken pipe as the test client disconnects. // Usually broken pipe as the test client disconnects.
return return
} }
@ -1360,7 +1360,7 @@ func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
f.Flush() f.Flush()
} }
} }
l.cond.Wait() lc.cond.Wait()
} }
} }

@ -481,32 +481,32 @@ func newRateLimitedListener(ln net.Listener, limit rate.Limit, burst int) *rateL
return &rateLimitedListener{Listener: ln, lim: rate.NewLimiter(limit, burst)} return &rateLimitedListener{Listener: ln, lim: rate.NewLimiter(limit, burst)}
} }
func (l *rateLimitedListener) ExpVar() expvar.Var { func (ln *rateLimitedListener) ExpVar() expvar.Var {
m := new(metrics.Set) m := new(metrics.Set)
m.Set("counter_accepted_connections", &l.numAccepts) m.Set("counter_accepted_connections", &ln.numAccepts)
m.Set("counter_rejected_connections", &l.numRejects) m.Set("counter_rejected_connections", &ln.numRejects)
return m return m
} }
var errLimitedConn = errors.New("cannot accept connection; rate limited") var errLimitedConn = errors.New("cannot accept connection; rate limited")
func (l *rateLimitedListener) Accept() (net.Conn, error) { func (ln *rateLimitedListener) Accept() (net.Conn, error) {
// Even under a rate limited situation, we accept the connection immediately // Even under a rate limited situation, we accept the connection immediately
// and close it, rather than being slow at accepting new connections. // and close it, rather than being slow at accepting new connections.
// This provides two benefits: 1) it signals to the client that something // This provides two benefits: 1) it signals to the client that something
// is going on on the server, and 2) it prevents new connections from // is going on on the server, and 2) it prevents new connections from
// piling up and occupying resources in the OS kernel. // piling up and occupying resources in the OS kernel.
// The client will retry as needing (with backoffs in place). // The client will retry as needing (with backoffs in place).
cn, err := l.Listener.Accept() cn, err := ln.Listener.Accept()
if err != nil { if err != nil {
return nil, err return nil, err
} }
if !l.lim.Allow() { if !ln.lim.Allow() {
l.numRejects.Add(1) ln.numRejects.Add(1)
cn.Close() cn.Close()
return nil, errLimitedConn return nil, errLimitedConn
} }
l.numAccepts.Add(1) ln.numAccepts.Add(1)
return cn, nil return cn, nil
} }

@ -36,21 +36,21 @@ type egressEpsReconciler struct {
// It compares tailnet service state stored in egress proxy state Secrets by containerboot with the desired // It compares tailnet service state stored in egress proxy state Secrets by containerboot with the desired
// configuration stored in proxy-cfg ConfigMap to determine if the endpoint is ready. // configuration stored in proxy-cfg ConfigMap to determine if the endpoint is ready.
func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
l := er.logger.With("Service", req.NamespacedName) lg := er.logger.With("Service", req.NamespacedName)
l.Debugf("starting reconcile") lg.Debugf("starting reconcile")
defer l.Debugf("reconcile finished") defer lg.Debugf("reconcile finished")
eps := new(discoveryv1.EndpointSlice) eps := new(discoveryv1.EndpointSlice)
err = er.Get(ctx, req.NamespacedName, eps) err = er.Get(ctx, req.NamespacedName, eps)
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
l.Debugf("EndpointSlice not found") lg.Debugf("EndpointSlice not found")
return reconcile.Result{}, nil return reconcile.Result{}, nil
} }
if err != nil { if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get EndpointSlice: %w", err) return reconcile.Result{}, fmt.Errorf("failed to get EndpointSlice: %w", err)
} }
if !eps.DeletionTimestamp.IsZero() { if !eps.DeletionTimestamp.IsZero() {
l.Debugf("EnpointSlice is being deleted") lg.Debugf("EnpointSlice is being deleted")
return res, nil return res, nil
} }
@ -64,7 +64,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
} }
err = er.Get(ctx, client.ObjectKeyFromObject(svc), svc) err = er.Get(ctx, client.ObjectKeyFromObject(svc), svc)
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
l.Infof("ExternalName Service %s/%s not found, perhaps it was deleted", svc.Namespace, svc.Name) lg.Infof("ExternalName Service %s/%s not found, perhaps it was deleted", svc.Namespace, svc.Name)
return res, nil return res, nil
} }
if err != nil { if err != nil {
@ -77,7 +77,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
oldEps := eps.DeepCopy() oldEps := eps.DeepCopy()
tailnetSvc := tailnetSvcName(svc) tailnetSvc := tailnetSvcName(svc)
l = l.With("tailnet-service-name", tailnetSvc) lg = lg.With("tailnet-service-name", tailnetSvc)
// Retrieve the desired tailnet service configuration from the ConfigMap. // Retrieve the desired tailnet service configuration from the ConfigMap.
proxyGroupName := eps.Labels[labelProxyGroup] proxyGroupName := eps.Labels[labelProxyGroup]
@ -88,12 +88,12 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
if cfgs == nil { if cfgs == nil {
// TODO(irbekrm): this path would be hit if egress service was once exposed on a ProxyGroup that later // TODO(irbekrm): this path would be hit if egress service was once exposed on a ProxyGroup that later
// got deleted. Probably the EndpointSlices then need to be deleted too- need to rethink this flow. // got deleted. Probably the EndpointSlices then need to be deleted too- need to rethink this flow.
l.Debugf("No egress config found, likely because ProxyGroup has not been created") lg.Debugf("No egress config found, likely because ProxyGroup has not been created")
return res, nil return res, nil
} }
cfg, ok := (*cfgs)[tailnetSvc] cfg, ok := (*cfgs)[tailnetSvc]
if !ok { if !ok {
l.Infof("[unexpected] configuration for tailnet service %s not found", tailnetSvc) lg.Infof("[unexpected] configuration for tailnet service %s not found", tailnetSvc)
return res, nil return res, nil
} }
@ -105,7 +105,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
} }
newEndpoints := make([]discoveryv1.Endpoint, 0) newEndpoints := make([]discoveryv1.Endpoint, 0)
for _, pod := range podList.Items { for _, pod := range podList.Items {
ready, err := er.podIsReadyToRouteTraffic(ctx, pod, &cfg, tailnetSvc, l) ready, err := er.podIsReadyToRouteTraffic(ctx, pod, &cfg, tailnetSvc, lg)
if err != nil { if err != nil {
return res, fmt.Errorf("error verifying if Pod is ready to route traffic: %w", err) return res, fmt.Errorf("error verifying if Pod is ready to route traffic: %w", err)
} }
@ -130,7 +130,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
// run a cleanup for deleted Pods etc. // run a cleanup for deleted Pods etc.
eps.Endpoints = newEndpoints eps.Endpoints = newEndpoints
if !reflect.DeepEqual(eps, oldEps) { if !reflect.DeepEqual(eps, oldEps) {
l.Infof("Updating EndpointSlice to ensure traffic is routed to ready proxy Pods") lg.Infof("Updating EndpointSlice to ensure traffic is routed to ready proxy Pods")
if err := er.Update(ctx, eps); err != nil { if err := er.Update(ctx, eps); err != nil {
return res, fmt.Errorf("error updating EndpointSlice: %w", err) return res, fmt.Errorf("error updating EndpointSlice: %w", err)
} }
@ -154,11 +154,11 @@ func podIPv4(pod *corev1.Pod) (string, error) {
// podIsReadyToRouteTraffic returns true if it appears that the proxy Pod has configured firewall rules to be able to // podIsReadyToRouteTraffic returns true if it appears that the proxy Pod has configured firewall rules to be able to
// route traffic to the given tailnet service. It retrieves the proxy's state Secret and compares the tailnet service // route traffic to the given tailnet service. It retrieves the proxy's state Secret and compares the tailnet service
// status written there to the desired service configuration. // status written there to the desired service configuration.
func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod corev1.Pod, cfg *egressservices.Config, tailnetSvcName string, l *zap.SugaredLogger) (bool, error) { func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod corev1.Pod, cfg *egressservices.Config, tailnetSvcName string, lg *zap.SugaredLogger) (bool, error) {
l = l.With("proxy_pod", pod.Name) lg = lg.With("proxy_pod", pod.Name)
l.Debugf("checking whether proxy is ready to route to egress service") lg.Debugf("checking whether proxy is ready to route to egress service")
if !pod.DeletionTimestamp.IsZero() { if !pod.DeletionTimestamp.IsZero() {
l.Debugf("proxy Pod is being deleted, ignore") lg.Debugf("proxy Pod is being deleted, ignore")
return false, nil return false, nil
} }
podIP, err := podIPv4(&pod) podIP, err := podIPv4(&pod)
@ -166,7 +166,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod
return false, fmt.Errorf("error determining Pod IP address: %v", err) return false, fmt.Errorf("error determining Pod IP address: %v", err)
} }
if podIP == "" { if podIP == "" {
l.Infof("[unexpected] Pod does not have an IPv4 address, and IPv6 is not currently supported") lg.Infof("[unexpected] Pod does not have an IPv4 address, and IPv6 is not currently supported")
return false, nil return false, nil
} }
stateS := &corev1.Secret{ stateS := &corev1.Secret{
@ -177,7 +177,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod
} }
err = er.Get(ctx, client.ObjectKeyFromObject(stateS), stateS) err = er.Get(ctx, client.ObjectKeyFromObject(stateS), stateS)
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
l.Debugf("proxy does not have a state Secret, waiting...") lg.Debugf("proxy does not have a state Secret, waiting...")
return false, nil return false, nil
} }
if err != nil { if err != nil {
@ -185,7 +185,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod
} }
svcStatusBS := stateS.Data[egressservices.KeyEgressServices] svcStatusBS := stateS.Data[egressservices.KeyEgressServices]
if len(svcStatusBS) == 0 { if len(svcStatusBS) == 0 {
l.Debugf("proxy's state Secret does not contain egress services status, waiting...") lg.Debugf("proxy's state Secret does not contain egress services status, waiting...")
return false, nil return false, nil
} }
svcStatus := &egressservices.Status{} svcStatus := &egressservices.Status{}
@ -193,22 +193,22 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod
return false, fmt.Errorf("error unmarshalling egress service status: %w", err) return false, fmt.Errorf("error unmarshalling egress service status: %w", err)
} }
if !strings.EqualFold(podIP, svcStatus.PodIPv4) { if !strings.EqualFold(podIP, svcStatus.PodIPv4) {
l.Infof("proxy's egress service status is for Pod IP %s, current proxy's Pod IP %s, waiting for the proxy to reconfigure...", svcStatus.PodIPv4, podIP) lg.Infof("proxy's egress service status is for Pod IP %s, current proxy's Pod IP %s, waiting for the proxy to reconfigure...", svcStatus.PodIPv4, podIP)
return false, nil return false, nil
} }
st, ok := (*svcStatus).Services[tailnetSvcName] st, ok := (*svcStatus).Services[tailnetSvcName]
if !ok { if !ok {
l.Infof("proxy's state Secret does not have egress service status, waiting...") lg.Infof("proxy's state Secret does not have egress service status, waiting...")
return false, nil return false, nil
} }
if !reflect.DeepEqual(cfg.TailnetTarget, st.TailnetTarget) { if !reflect.DeepEqual(cfg.TailnetTarget, st.TailnetTarget) {
l.Infof("proxy has configured egress service for tailnet target %v, current target is %v, waiting for proxy to reconfigure...", st.TailnetTarget, cfg.TailnetTarget) lg.Infof("proxy has configured egress service for tailnet target %v, current target is %v, waiting for proxy to reconfigure...", st.TailnetTarget, cfg.TailnetTarget)
return false, nil return false, nil
} }
if !reflect.DeepEqual(cfg.Ports, st.Ports) { if !reflect.DeepEqual(cfg.Ports, st.Ports) {
l.Debugf("proxy has configured egress service for ports %#+v, wants ports %#+v, waiting for proxy to reconfigure", st.Ports, cfg.Ports) lg.Debugf("proxy has configured egress service for ports %#+v, wants ports %#+v, waiting for proxy to reconfigure", st.Ports, cfg.Ports)
return false, nil return false, nil
} }
l.Debugf("proxy is ready to route traffic to egress service") lg.Debugf("proxy is ready to route traffic to egress service")
return true, nil return true, nil
} }

@ -71,9 +71,9 @@ type egressPodsReconciler struct {
// If the Pod does not appear to be serving the health check endpoint (pre-v1.80 proxies), the reconciler just sets the // If the Pod does not appear to be serving the health check endpoint (pre-v1.80 proxies), the reconciler just sets the
// readiness condition for backwards compatibility reasons. // readiness condition for backwards compatibility reasons.
func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
l := er.logger.With("Pod", req.NamespacedName) lg := er.logger.With("Pod", req.NamespacedName)
l.Debugf("starting reconcile") lg.Debugf("starting reconcile")
defer l.Debugf("reconcile finished") defer lg.Debugf("reconcile finished")
pod := new(corev1.Pod) pod := new(corev1.Pod)
err = er.Get(ctx, req.NamespacedName, pod) err = er.Get(ctx, req.NamespacedName, pod)
@ -84,11 +84,11 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
return reconcile.Result{}, fmt.Errorf("failed to get Pod: %w", err) return reconcile.Result{}, fmt.Errorf("failed to get Pod: %w", err)
} }
if !pod.DeletionTimestamp.IsZero() { if !pod.DeletionTimestamp.IsZero() {
l.Debugf("Pod is being deleted, do nothing") lg.Debugf("Pod is being deleted, do nothing")
return res, nil return res, nil
} }
if pod.Labels[LabelParentType] != proxyTypeProxyGroup { if pod.Labels[LabelParentType] != proxyTypeProxyGroup {
l.Infof("[unexpected] reconciler called for a Pod that is not a ProxyGroup Pod") lg.Infof("[unexpected] reconciler called for a Pod that is not a ProxyGroup Pod")
return res, nil return res, nil
} }
@ -97,7 +97,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
if !slices.ContainsFunc(pod.Spec.ReadinessGates, func(r corev1.PodReadinessGate) bool { if !slices.ContainsFunc(pod.Spec.ReadinessGates, func(r corev1.PodReadinessGate) bool {
return r.ConditionType == tsEgressReadinessGate return r.ConditionType == tsEgressReadinessGate
}) { }) {
l.Debug("Pod does not have egress readiness gate set, skipping") lg.Debug("Pod does not have egress readiness gate set, skipping")
return res, nil return res, nil
} }
@ -107,7 +107,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
return res, fmt.Errorf("error getting ProxyGroup %q: %w", proxyGroupName, err) return res, fmt.Errorf("error getting ProxyGroup %q: %w", proxyGroupName, err)
} }
if pg.Spec.Type != typeEgress { if pg.Spec.Type != typeEgress {
l.Infof("[unexpected] reconciler called for %q ProxyGroup Pod", pg.Spec.Type) lg.Infof("[unexpected] reconciler called for %q ProxyGroup Pod", pg.Spec.Type)
return res, nil return res, nil
} }
// Get all ClusterIP Services for all egress targets exposed to cluster via this ProxyGroup. // Get all ClusterIP Services for all egress targets exposed to cluster via this ProxyGroup.
@ -125,7 +125,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
return c.Type == tsEgressReadinessGate return c.Type == tsEgressReadinessGate
}) })
if idx != -1 { if idx != -1 {
l.Debugf("Pod is already ready, do nothing") lg.Debugf("Pod is already ready, do nothing")
return res, nil return res, nil
} }
@ -134,7 +134,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
for _, svc := range svcs.Items { for _, svc := range svcs.Items {
s := svc s := svc
go func() { go func() {
ll := l.With("service_name", s.Name) ll := lg.With("service_name", s.Name)
d := retrieveClusterDomain(er.tsNamespace, ll) d := retrieveClusterDomain(er.tsNamespace, ll)
healthCheckAddr := healthCheckForSvc(&s, d) healthCheckAddr := healthCheckForSvc(&s, d)
if healthCheckAddr == "" { if healthCheckAddr == "" {
@ -178,22 +178,22 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
return res, fmt.Errorf("error verifying conectivity: %w", err) return res, fmt.Errorf("error verifying conectivity: %w", err)
} }
if rm := routesMissing.Load(); rm { if rm := routesMissing.Load(); rm {
l.Info("Pod is not yet added as an endpoint for all egress targets, waiting...") lg.Info("Pod is not yet added as an endpoint for all egress targets, waiting...")
return reconcile.Result{RequeueAfter: shortRequeue}, nil return reconcile.Result{RequeueAfter: shortRequeue}, nil
} }
if err := er.setPodReady(ctx, pod, l); err != nil { if err := er.setPodReady(ctx, pod, lg); err != nil {
return res, fmt.Errorf("error setting Pod as ready: %w", err) return res, fmt.Errorf("error setting Pod as ready: %w", err)
} }
return res, nil return res, nil
} }
func (er *egressPodsReconciler) setPodReady(ctx context.Context, pod *corev1.Pod, l *zap.SugaredLogger) error { func (er *egressPodsReconciler) setPodReady(ctx context.Context, pod *corev1.Pod, lg *zap.SugaredLogger) error {
if slices.ContainsFunc(pod.Status.Conditions, func(c corev1.PodCondition) bool { if slices.ContainsFunc(pod.Status.Conditions, func(c corev1.PodCondition) bool {
return c.Type == tsEgressReadinessGate return c.Type == tsEgressReadinessGate
}) { }) {
return nil return nil
} }
l.Infof("Pod is ready to route traffic to all egress targets") lg.Infof("Pod is ready to route traffic to all egress targets")
pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{ pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{
Type: tsEgressReadinessGate, Type: tsEgressReadinessGate,
Status: corev1.ConditionTrue, Status: corev1.ConditionTrue,
@ -216,11 +216,11 @@ const (
) )
// lookupPodRouteViaSvc attempts to reach a Pod using a health check endpoint served by a Service and returns the state of the health check. // lookupPodRouteViaSvc attempts to reach a Pod using a health check endpoint served by a Service and returns the state of the health check.
func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *corev1.Pod, healthCheckAddr string, l *zap.SugaredLogger) (healthCheckState, error) { func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *corev1.Pod, healthCheckAddr string, lg *zap.SugaredLogger) (healthCheckState, error) {
if !slices.ContainsFunc(pod.Spec.Containers[0].Env, func(e corev1.EnvVar) bool { if !slices.ContainsFunc(pod.Spec.Containers[0].Env, func(e corev1.EnvVar) bool {
return e.Name == "TS_ENABLE_HEALTH_CHECK" && e.Value == "true" return e.Name == "TS_ENABLE_HEALTH_CHECK" && e.Value == "true"
}) { }) {
l.Debugf("Pod does not have health check enabled, unable to verify if it is currently routable via Service") lg.Debugf("Pod does not have health check enabled, unable to verify if it is currently routable via Service")
return cannotVerify, nil return cannotVerify, nil
} }
wantsIP, err := podIPv4(pod) wantsIP, err := podIPv4(pod)
@ -248,7 +248,7 @@ func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *c
defer resp.Body.Close() defer resp.Body.Close()
gotIP := resp.Header.Get(kubetypes.PodIPv4Header) gotIP := resp.Header.Get(kubetypes.PodIPv4Header)
if gotIP == "" { if gotIP == "" {
l.Debugf("Health check does not return Pod's IP header, unable to verify if Pod is currently routable via Service") lg.Debugf("Health check does not return Pod's IP header, unable to verify if Pod is currently routable via Service")
return cannotVerify, nil return cannotVerify, nil
} }
if !strings.EqualFold(wantsIP, gotIP) { if !strings.EqualFold(wantsIP, gotIP) {

@ -47,13 +47,13 @@ type egressSvcsReadinessReconciler struct {
// route traffic to the target. It compares proxy Pod IPs with the endpoints set on the EndpointSlice for the egress // route traffic to the target. It compares proxy Pod IPs with the endpoints set on the EndpointSlice for the egress
// service to determine how many replicas are currently able to route traffic. // service to determine how many replicas are currently able to route traffic.
func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
l := esrr.logger.With("Service", req.NamespacedName) lg := esrr.logger.With("Service", req.NamespacedName)
l.Debugf("starting reconcile") lg.Debugf("starting reconcile")
defer l.Debugf("reconcile finished") defer lg.Debugf("reconcile finished")
svc := new(corev1.Service) svc := new(corev1.Service)
if err = esrr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) { if err = esrr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) {
l.Debugf("Service not found") lg.Debugf("Service not found")
return res, nil return res, nil
} else if err != nil { } else if err != nil {
return res, fmt.Errorf("failed to get Service: %w", err) return res, fmt.Errorf("failed to get Service: %w", err)
@ -64,7 +64,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
) )
oldStatus := svc.Status.DeepCopy() oldStatus := svc.Status.DeepCopy()
defer func() { defer func() {
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, l) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, lg)
if !apiequality.Semantic.DeepEqual(oldStatus, &svc.Status) { if !apiequality.Semantic.DeepEqual(oldStatus, &svc.Status) {
err = errors.Join(err, esrr.Status().Update(ctx, svc)) err = errors.Join(err, esrr.Status().Update(ctx, svc))
} }
@ -79,7 +79,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
return res, err return res, err
} }
if eps == nil { if eps == nil {
l.Infof("EndpointSlice for Service does not yet exist, waiting...") lg.Infof("EndpointSlice for Service does not yet exist, waiting...")
reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady
st = metav1.ConditionFalse st = metav1.ConditionFalse
return res, nil return res, nil
@ -91,7 +91,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
} }
err = esrr.Get(ctx, client.ObjectKeyFromObject(pg), pg) err = esrr.Get(ctx, client.ObjectKeyFromObject(pg), pg)
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
l.Infof("ProxyGroup for Service does not exist, waiting...") lg.Infof("ProxyGroup for Service does not exist, waiting...")
reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady
st = metav1.ConditionFalse st = metav1.ConditionFalse
return res, nil return res, nil
@ -103,7 +103,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
return res, err return res, err
} }
if !tsoperator.ProxyGroupAvailable(pg) { if !tsoperator.ProxyGroupAvailable(pg) {
l.Infof("ProxyGroup for Service is not ready, waiting...") lg.Infof("ProxyGroup for Service is not ready, waiting...")
reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady
st = metav1.ConditionFalse st = metav1.ConditionFalse
return res, nil return res, nil
@ -111,7 +111,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
replicas := pgReplicas(pg) replicas := pgReplicas(pg)
if replicas == 0 { if replicas == 0 {
l.Infof("ProxyGroup replicas set to 0") lg.Infof("ProxyGroup replicas set to 0")
reason, msg = reasonNoProxies, reasonNoProxies reason, msg = reasonNoProxies, reasonNoProxies
st = metav1.ConditionFalse st = metav1.ConditionFalse
return res, nil return res, nil
@ -128,16 +128,16 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
return res, err return res, err
} }
if pod == nil { if pod == nil {
l.Warnf("[unexpected] ProxyGroup is ready, but replica %d was not found", i) lg.Warnf("[unexpected] ProxyGroup is ready, but replica %d was not found", i)
reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady
return res, nil return res, nil
} }
l.Debugf("looking at Pod with IPs %v", pod.Status.PodIPs) lg.Debugf("looking at Pod with IPs %v", pod.Status.PodIPs)
ready := false ready := false
for _, ep := range eps.Endpoints { for _, ep := range eps.Endpoints {
l.Debugf("looking at endpoint with addresses %v", ep.Addresses) lg.Debugf("looking at endpoint with addresses %v", ep.Addresses)
if endpointReadyForPod(&ep, pod, l) { if endpointReadyForPod(&ep, pod, lg) {
l.Debugf("endpoint is ready for Pod") lg.Debugf("endpoint is ready for Pod")
ready = true ready = true
break break
} }
@ -163,10 +163,10 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
// endpointReadyForPod returns true if the endpoint is for the Pod's IPv4 address and is ready to serve traffic. // endpointReadyForPod returns true if the endpoint is for the Pod's IPv4 address and is ready to serve traffic.
// Endpoint must not be nil. // Endpoint must not be nil.
func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, l *zap.SugaredLogger) bool { func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, lg *zap.SugaredLogger) bool {
podIP, err := podIPv4(pod) podIP, err := podIPv4(pod)
if err != nil { if err != nil {
l.Warnf("[unexpected] error retrieving Pod's IPv4 address: %v", err) lg.Warnf("[unexpected] error retrieving Pod's IPv4 address: %v", err)
return false return false
} }
// Currently we only ever set a single address on and Endpoint and nothing else is meant to modify this. // Currently we only ever set a single address on and Endpoint and nothing else is meant to modify this.

@ -49,12 +49,12 @@ func TestEgressServiceReadiness(t *testing.T) {
}, },
} }
fakeClusterIPSvc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "my-app", Namespace: "operator-ns"}} fakeClusterIPSvc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "my-app", Namespace: "operator-ns"}}
l := egressSvcEpsLabels(egressSvc, fakeClusterIPSvc) labels := egressSvcEpsLabels(egressSvc, fakeClusterIPSvc)
eps := &discoveryv1.EndpointSlice{ eps := &discoveryv1.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "my-app", Name: "my-app",
Namespace: "operator-ns", Namespace: "operator-ns",
Labels: l, Labels: labels,
}, },
AddressType: discoveryv1.AddressTypeIPv4, AddressType: discoveryv1.AddressTypeIPv4,
} }
@ -118,26 +118,26 @@ func TestEgressServiceReadiness(t *testing.T) {
}) })
} }
func setClusterNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger) { func setClusterNotReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger) {
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonClusterResourcesNotReady, reasonClusterResourcesNotReady, cl, l) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonClusterResourcesNotReady, reasonClusterResourcesNotReady, cl, lg)
} }
func setNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas int32) { func setNotReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger, replicas int32) {
msg := fmt.Sprintf(msgReadyToRouteTemplate, 0, replicas) msg := fmt.Sprintf(msgReadyToRouteTemplate, 0, replicas)
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonNotReady, msg, cl, l) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonNotReady, msg, cl, lg)
} }
func setReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas, readyReplicas int32) { func setReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger, replicas, readyReplicas int32) {
reason := reasonPartiallyReady reason := reasonPartiallyReady
if readyReplicas == replicas { if readyReplicas == replicas {
reason = reasonReady reason = reasonReady
} }
msg := fmt.Sprintf(msgReadyToRouteTemplate, readyReplicas, replicas) msg := fmt.Sprintf(msgReadyToRouteTemplate, readyReplicas, replicas)
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionTrue, reason, msg, cl, l) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionTrue, reason, msg, cl, lg)
} }
func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, l *zap.SugaredLogger) { func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, lg *zap.SugaredLogger) {
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, l) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, lg)
} }
func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1.EndpointSlice) { func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1.EndpointSlice) {
@ -153,14 +153,14 @@ func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1
} }
func pod(pg *tsapi.ProxyGroup, ordinal int32) *corev1.Pod { func pod(pg *tsapi.ProxyGroup, ordinal int32) *corev1.Pod {
l := pgLabels(pg.Name, nil) labels := pgLabels(pg.Name, nil)
l[appsv1.PodIndexLabel] = fmt.Sprintf("%d", ordinal) labels[appsv1.PodIndexLabel] = fmt.Sprintf("%d", ordinal)
ip := fmt.Sprintf("10.0.0.%d", ordinal) ip := fmt.Sprintf("10.0.0.%d", ordinal)
return &corev1.Pod{ return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", pg.Name, ordinal), Name: fmt.Sprintf("%s-%d", pg.Name, ordinal),
Namespace: "operator-ns", Namespace: "operator-ns",
Labels: l, Labels: labels,
}, },
Status: corev1.PodStatus{ Status: corev1.PodStatus{
PodIPs: []corev1.PodIP{{IP: ip}}, PodIPs: []corev1.PodIP{{IP: ip}},

@ -98,12 +98,12 @@ type egressSvcsReconciler struct {
// - updates the egress service config in a ConfigMap mounted to the ProxyGroup proxies with the tailnet target and the // - updates the egress service config in a ConfigMap mounted to the ProxyGroup proxies with the tailnet target and the
// portmappings. // portmappings.
func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
l := esr.logger.With("Service", req.NamespacedName) lg := esr.logger.With("Service", req.NamespacedName)
defer l.Info("reconcile finished") defer lg.Info("reconcile finished")
svc := new(corev1.Service) svc := new(corev1.Service)
if err = esr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) { if err = esr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) {
l.Info("Service not found") lg.Info("Service not found")
return res, nil return res, nil
} else if err != nil { } else if err != nil {
return res, fmt.Errorf("failed to get Service: %w", err) return res, fmt.Errorf("failed to get Service: %w", err)
@ -111,7 +111,7 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
// Name of the 'egress service', meaning the tailnet target. // Name of the 'egress service', meaning the tailnet target.
tailnetSvc := tailnetSvcName(svc) tailnetSvc := tailnetSvcName(svc)
l = l.With("tailnet-service", tailnetSvc) lg = lg.With("tailnet-service", tailnetSvc)
// Note that resources for egress Services are only cleaned up when the // Note that resources for egress Services are only cleaned up when the
// Service is actually deleted (and not if, for example, user decides to // Service is actually deleted (and not if, for example, user decides to
@ -119,8 +119,8 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
// assume that the egress ExternalName Services are always created for // assume that the egress ExternalName Services are always created for
// Tailscale operator specifically. // Tailscale operator specifically.
if !svc.DeletionTimestamp.IsZero() { if !svc.DeletionTimestamp.IsZero() {
l.Info("Service is being deleted, ensuring resource cleanup") lg.Info("Service is being deleted, ensuring resource cleanup")
return res, esr.maybeCleanup(ctx, svc, l) return res, esr.maybeCleanup(ctx, svc, lg)
} }
oldStatus := svc.Status.DeepCopy() oldStatus := svc.Status.DeepCopy()
@ -131,7 +131,7 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
}() }()
// Validate the user-created ExternalName Service and the associated ProxyGroup. // Validate the user-created ExternalName Service and the associated ProxyGroup.
if ok, err := esr.validateClusterResources(ctx, svc, l); err != nil { if ok, err := esr.validateClusterResources(ctx, svc, lg); err != nil {
return res, fmt.Errorf("error validating cluster resources: %w", err) return res, fmt.Errorf("error validating cluster resources: %w", err)
} else if !ok { } else if !ok {
return res, nil return res, nil
@ -141,8 +141,8 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
svc.Finalizers = append(svc.Finalizers, FinalizerName) svc.Finalizers = append(svc.Finalizers, FinalizerName)
if err := esr.updateSvcSpec(ctx, svc); err != nil { if err := esr.updateSvcSpec(ctx, svc); err != nil {
err := fmt.Errorf("failed to add finalizer: %w", err) err := fmt.Errorf("failed to add finalizer: %w", err)
r := svcConfiguredReason(svc, false, l) r := svcConfiguredReason(svc, false, lg)
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, lg)
return res, err return res, err
} }
esr.mu.Lock() esr.mu.Lock()
@ -151,16 +151,16 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
esr.mu.Unlock() esr.mu.Unlock()
} }
if err := esr.maybeCleanupProxyGroupConfig(ctx, svc, l); err != nil { if err := esr.maybeCleanupProxyGroupConfig(ctx, svc, lg); err != nil {
err = fmt.Errorf("cleaning up resources for previous ProxyGroup failed: %w", err) err = fmt.Errorf("cleaning up resources for previous ProxyGroup failed: %w", err)
r := svcConfiguredReason(svc, false, l) r := svcConfiguredReason(svc, false, lg)
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, lg)
return res, err return res, err
} }
if err := esr.maybeProvision(ctx, svc, l); err != nil { if err := esr.maybeProvision(ctx, svc, lg); err != nil {
if strings.Contains(err.Error(), optimisticLockErrorMsg) { if strings.Contains(err.Error(), optimisticLockErrorMsg) {
l.Infof("optimistic lock error, retrying: %s", err) lg.Infof("optimistic lock error, retrying: %s", err)
} else { } else {
return reconcile.Result{}, err return reconcile.Result{}, err
} }
@ -169,15 +169,15 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
return res, nil return res, nil
} }
func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (err error) { func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) (err error) {
r := svcConfiguredReason(svc, false, l) r := svcConfiguredReason(svc, false, lg)
st := metav1.ConditionFalse st := metav1.ConditionFalse
defer func() { defer func() {
msg := r msg := r
if st != metav1.ConditionTrue && err != nil { if st != metav1.ConditionTrue && err != nil {
msg = err.Error() msg = err.Error()
} }
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, st, r, msg, esr.clock, l) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, st, r, msg, esr.clock, lg)
}() }()
crl := egressSvcChildResourceLabels(svc) crl := egressSvcChildResourceLabels(svc)
@ -189,36 +189,36 @@ func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1
if clusterIPSvc == nil { if clusterIPSvc == nil {
clusterIPSvc = esr.clusterIPSvcForEgress(crl) clusterIPSvc = esr.clusterIPSvcForEgress(crl)
} }
upToDate := svcConfigurationUpToDate(svc, l) upToDate := svcConfigurationUpToDate(svc, lg)
provisioned := true provisioned := true
if !upToDate { if !upToDate {
if clusterIPSvc, provisioned, err = esr.provision(ctx, svc.Annotations[AnnotationProxyGroup], svc, clusterIPSvc, l); err != nil { if clusterIPSvc, provisioned, err = esr.provision(ctx, svc.Annotations[AnnotationProxyGroup], svc, clusterIPSvc, lg); err != nil {
return err return err
} }
} }
if !provisioned { if !provisioned {
l.Infof("unable to provision cluster resources") lg.Infof("unable to provision cluster resources")
return nil return nil
} }
// Update ExternalName Service to point at the ClusterIP Service. // Update ExternalName Service to point at the ClusterIP Service.
clusterDomain := retrieveClusterDomain(esr.tsNamespace, l) clusterDomain := retrieveClusterDomain(esr.tsNamespace, lg)
clusterIPSvcFQDN := fmt.Sprintf("%s.%s.svc.%s", clusterIPSvc.Name, clusterIPSvc.Namespace, clusterDomain) clusterIPSvcFQDN := fmt.Sprintf("%s.%s.svc.%s", clusterIPSvc.Name, clusterIPSvc.Namespace, clusterDomain)
if svc.Spec.ExternalName != clusterIPSvcFQDN { if svc.Spec.ExternalName != clusterIPSvcFQDN {
l.Infof("Configuring ExternalName Service to point to ClusterIP Service %s", clusterIPSvcFQDN) lg.Infof("Configuring ExternalName Service to point to ClusterIP Service %s", clusterIPSvcFQDN)
svc.Spec.ExternalName = clusterIPSvcFQDN svc.Spec.ExternalName = clusterIPSvcFQDN
if err = esr.updateSvcSpec(ctx, svc); err != nil { if err = esr.updateSvcSpec(ctx, svc); err != nil {
err = fmt.Errorf("error updating ExternalName Service: %w", err) err = fmt.Errorf("error updating ExternalName Service: %w", err)
return err return err
} }
} }
r = svcConfiguredReason(svc, true, l) r = svcConfiguredReason(svc, true, lg)
st = metav1.ConditionTrue st = metav1.ConditionTrue
return nil return nil
} }
func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName string, svc, clusterIPSvc *corev1.Service, l *zap.SugaredLogger) (*corev1.Service, bool, error) { func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName string, svc, clusterIPSvc *corev1.Service, lg *zap.SugaredLogger) (*corev1.Service, bool, error) {
l.Infof("updating configuration...") lg.Infof("updating configuration...")
usedPorts, err := esr.usedPortsForPG(ctx, proxyGroupName) usedPorts, err := esr.usedPortsForPG(ctx, proxyGroupName)
if err != nil { if err != nil {
return nil, false, fmt.Errorf("error calculating used ports for ProxyGroup %s: %w", proxyGroupName, err) return nil, false, fmt.Errorf("error calculating used ports for ProxyGroup %s: %w", proxyGroupName, err)
@ -246,7 +246,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
} }
} }
if !found { if !found {
l.Debugf("portmapping %s:%d -> %s:%d is no longer required, removing", pm.Protocol, pm.TargetPort.IntVal, pm.Protocol, pm.Port) lg.Debugf("portmapping %s:%d -> %s:%d is no longer required, removing", pm.Protocol, pm.TargetPort.IntVal, pm.Protocol, pm.Port)
clusterIPSvc.Spec.Ports = slices.Delete(clusterIPSvc.Spec.Ports, i, i+1) clusterIPSvc.Spec.Ports = slices.Delete(clusterIPSvc.Spec.Ports, i, i+1)
} }
} }
@ -277,7 +277,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
return nil, false, fmt.Errorf("unable to allocate additional ports on ProxyGroup %s, %d ports already used. Create another ProxyGroup or open an issue if you believe this is unexpected.", proxyGroupName, maxPorts) return nil, false, fmt.Errorf("unable to allocate additional ports on ProxyGroup %s, %d ports already used. Create another ProxyGroup or open an issue if you believe this is unexpected.", proxyGroupName, maxPorts)
} }
p := unusedPort(usedPorts) p := unusedPort(usedPorts)
l.Debugf("mapping tailnet target port %d to container port %d", wantsPM.Port, p) lg.Debugf("mapping tailnet target port %d to container port %d", wantsPM.Port, p)
usedPorts.Insert(p) usedPorts.Insert(p)
clusterIPSvc.Spec.Ports = append(clusterIPSvc.Spec.Ports, corev1.ServicePort{ clusterIPSvc.Spec.Ports = append(clusterIPSvc.Spec.Ports, corev1.ServicePort{
Name: wantsPM.Name, Name: wantsPM.Name,
@ -343,14 +343,14 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
return nil, false, fmt.Errorf("error retrieving egress services configuration: %w", err) return nil, false, fmt.Errorf("error retrieving egress services configuration: %w", err)
} }
if cm == nil { if cm == nil {
l.Info("ConfigMap not yet created, waiting..") lg.Info("ConfigMap not yet created, waiting..")
return nil, false, nil return nil, false, nil
} }
tailnetSvc := tailnetSvcName(svc) tailnetSvc := tailnetSvcName(svc)
gotCfg := (*cfgs)[tailnetSvc] gotCfg := (*cfgs)[tailnetSvc]
wantsCfg := egressSvcCfg(svc, clusterIPSvc, esr.tsNamespace, l) wantsCfg := egressSvcCfg(svc, clusterIPSvc, esr.tsNamespace, lg)
if !reflect.DeepEqual(gotCfg, wantsCfg) { if !reflect.DeepEqual(gotCfg, wantsCfg) {
l.Debugf("updating egress services ConfigMap %s", cm.Name) lg.Debugf("updating egress services ConfigMap %s", cm.Name)
mak.Set(cfgs, tailnetSvc, wantsCfg) mak.Set(cfgs, tailnetSvc, wantsCfg)
bs, err := json.Marshal(cfgs) bs, err := json.Marshal(cfgs)
if err != nil { if err != nil {
@ -361,7 +361,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
return nil, false, fmt.Errorf("error updating egress services ConfigMap: %w", err) return nil, false, fmt.Errorf("error updating egress services ConfigMap: %w", err)
} }
} }
l.Infof("egress service configuration has been updated") lg.Infof("egress service configuration has been updated")
return clusterIPSvc, true, nil return clusterIPSvc, true, nil
} }
@ -402,7 +402,7 @@ func (esr *egressSvcsReconciler) maybeCleanup(ctx context.Context, svc *corev1.S
return nil return nil
} }
func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) error { func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) error {
wantsProxyGroup := svc.Annotations[AnnotationProxyGroup] wantsProxyGroup := svc.Annotations[AnnotationProxyGroup]
cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured) cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured)
if cond == nil { if cond == nil {
@ -416,7 +416,7 @@ func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Contex
return nil return nil
} }
esr.logger.Infof("egress Service configured on ProxyGroup %s, wants ProxyGroup %s, cleaning up...", ss[2], wantsProxyGroup) esr.logger.Infof("egress Service configured on ProxyGroup %s, wants ProxyGroup %s, cleaning up...", ss[2], wantsProxyGroup)
if err := esr.ensureEgressSvcCfgDeleted(ctx, svc, l); err != nil { if err := esr.ensureEgressSvcCfgDeleted(ctx, svc, lg); err != nil {
return fmt.Errorf("error deleting egress service config: %w", err) return fmt.Errorf("error deleting egress service config: %w", err)
} }
return nil return nil
@ -471,17 +471,17 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context,
Namespace: esr.tsNamespace, Namespace: esr.tsNamespace,
}, },
} }
l := logger.With("ConfigMap", client.ObjectKeyFromObject(cm)) lggr := logger.With("ConfigMap", client.ObjectKeyFromObject(cm))
l.Debug("ensuring that egress service configuration is removed from proxy config") lggr.Debug("ensuring that egress service configuration is removed from proxy config")
if err := esr.Get(ctx, client.ObjectKeyFromObject(cm), cm); apierrors.IsNotFound(err) { if err := esr.Get(ctx, client.ObjectKeyFromObject(cm), cm); apierrors.IsNotFound(err) {
l.Debugf("ConfigMap not found") lggr.Debugf("ConfigMap not found")
return nil return nil
} else if err != nil { } else if err != nil {
return fmt.Errorf("error retrieving ConfigMap: %w", err) return fmt.Errorf("error retrieving ConfigMap: %w", err)
} }
bs := cm.BinaryData[egressservices.KeyEgressServices] bs := cm.BinaryData[egressservices.KeyEgressServices]
if len(bs) == 0 { if len(bs) == 0 {
l.Debugf("ConfigMap does not contain egress service configs") lggr.Debugf("ConfigMap does not contain egress service configs")
return nil return nil
} }
cfgs := &egressservices.Configs{} cfgs := &egressservices.Configs{}
@ -491,12 +491,12 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context,
tailnetSvc := tailnetSvcName(svc) tailnetSvc := tailnetSvcName(svc)
_, ok := (*cfgs)[tailnetSvc] _, ok := (*cfgs)[tailnetSvc]
if !ok { if !ok {
l.Debugf("ConfigMap does not contain egress service config, likely because it was already deleted") lggr.Debugf("ConfigMap does not contain egress service config, likely because it was already deleted")
return nil return nil
} }
l.Infof("before deleting config %+#v", *cfgs) lggr.Infof("before deleting config %+#v", *cfgs)
delete(*cfgs, tailnetSvc) delete(*cfgs, tailnetSvc)
l.Infof("after deleting config %+#v", *cfgs) lggr.Infof("after deleting config %+#v", *cfgs)
bs, err := json.Marshal(cfgs) bs, err := json.Marshal(cfgs)
if err != nil { if err != nil {
return fmt.Errorf("error marshalling egress services configs: %w", err) return fmt.Errorf("error marshalling egress services configs: %w", err)
@ -505,7 +505,7 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context,
return esr.Update(ctx, cm) return esr.Update(ctx, cm)
} }
func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (bool, error) { func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) (bool, error) {
proxyGroupName := svc.Annotations[AnnotationProxyGroup] proxyGroupName := svc.Annotations[AnnotationProxyGroup]
pg := &tsapi.ProxyGroup{ pg := &tsapi.ProxyGroup{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -513,36 +513,36 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s
}, },
} }
if err := esr.Get(ctx, client.ObjectKeyFromObject(pg), pg); apierrors.IsNotFound(err) { if err := esr.Get(ctx, client.ObjectKeyFromObject(pg), pg); apierrors.IsNotFound(err) {
l.Infof("ProxyGroup %q not found, waiting...", proxyGroupName) lg.Infof("ProxyGroup %q not found, waiting...", proxyGroupName)
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, lg)
tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured)
return false, nil return false, nil
} else if err != nil { } else if err != nil {
err := fmt.Errorf("unable to retrieve ProxyGroup %s: %w", proxyGroupName, err) err := fmt.Errorf("unable to retrieve ProxyGroup %s: %w", proxyGroupName, err)
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, err.Error(), esr.clock, l) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, err.Error(), esr.clock, lg)
tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured)
return false, err return false, err
} }
if violations := validateEgressService(svc, pg); len(violations) > 0 { if violations := validateEgressService(svc, pg); len(violations) > 0 {
msg := fmt.Sprintf("invalid egress Service: %s", strings.Join(violations, ", ")) msg := fmt.Sprintf("invalid egress Service: %s", strings.Join(violations, ", "))
esr.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg) esr.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg)
l.Info(msg) lg.Info(msg)
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionFalse, reasonEgressSvcInvalid, msg, esr.clock, l) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionFalse, reasonEgressSvcInvalid, msg, esr.clock, lg)
tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured)
return false, nil return false, nil
} }
if !tsoperator.ProxyGroupAvailable(pg) { if !tsoperator.ProxyGroupAvailable(pg) {
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, lg)
tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured)
} }
l.Debugf("egress service is valid") lg.Debugf("egress service is valid")
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionTrue, reasonEgressSvcValid, reasonEgressSvcValid, esr.clock, l) tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionTrue, reasonEgressSvcValid, reasonEgressSvcValid, esr.clock, lg)
return true, nil return true, nil
} }
func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service, ns string, l *zap.SugaredLogger) egressservices.Config { func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service, ns string, lg *zap.SugaredLogger) egressservices.Config {
d := retrieveClusterDomain(ns, l) d := retrieveClusterDomain(ns, lg)
tt := tailnetTargetFromSvc(externalNameSvc) tt := tailnetTargetFromSvc(externalNameSvc)
hep := healthCheckForSvc(clusterIPSvc, d) hep := healthCheckForSvc(clusterIPSvc, d)
cfg := egressservices.Config{ cfg := egressservices.Config{
@ -691,18 +691,18 @@ func egressSvcChildResourceLabels(svc *corev1.Service) map[string]string {
// egressEpsLabels returns labels to be added to an EndpointSlice created for an egress service. // egressEpsLabels returns labels to be added to an EndpointSlice created for an egress service.
func egressSvcEpsLabels(extNSvc, clusterIPSvc *corev1.Service) map[string]string { func egressSvcEpsLabels(extNSvc, clusterIPSvc *corev1.Service) map[string]string {
l := egressSvcChildResourceLabels(extNSvc) lbels := egressSvcChildResourceLabels(extNSvc)
// Adding this label is what makes kube proxy set up rules to route traffic sent to the clusterIP Service to the // Adding this label is what makes kube proxy set up rules to route traffic sent to the clusterIP Service to the
// endpoints defined on this EndpointSlice. // endpoints defined on this EndpointSlice.
// https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership
l[discoveryv1.LabelServiceName] = clusterIPSvc.Name lbels[discoveryv1.LabelServiceName] = clusterIPSvc.Name
// Kubernetes recommends setting this label. // Kubernetes recommends setting this label.
// https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#management // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#management
l[discoveryv1.LabelManagedBy] = "tailscale.com" lbels[discoveryv1.LabelManagedBy] = "tailscale.com"
return l return lbels
} }
func svcConfigurationUpToDate(svc *corev1.Service, l *zap.SugaredLogger) bool { func svcConfigurationUpToDate(svc *corev1.Service, lg *zap.SugaredLogger) bool {
cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured) cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured)
if cond == nil { if cond == nil {
return false return false
@ -710,21 +710,21 @@ func svcConfigurationUpToDate(svc *corev1.Service, l *zap.SugaredLogger) bool {
if cond.Status != metav1.ConditionTrue { if cond.Status != metav1.ConditionTrue {
return false return false
} }
wantsReadyReason := svcConfiguredReason(svc, true, l) wantsReadyReason := svcConfiguredReason(svc, true, lg)
return strings.EqualFold(wantsReadyReason, cond.Reason) return strings.EqualFold(wantsReadyReason, cond.Reason)
} }
func cfgHash(c cfg, l *zap.SugaredLogger) string { func cfgHash(c cfg, lg *zap.SugaredLogger) string {
bs, err := json.Marshal(c) bs, err := json.Marshal(c)
if err != nil { if err != nil {
// Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace. // Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace.
l.Infof("error marhsalling Config: %v", err) lg.Infof("error marhsalling Config: %v", err)
return "" return ""
} }
h := sha256.New() h := sha256.New()
if _, err := h.Write(bs); err != nil { if _, err := h.Write(bs); err != nil {
// Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace. // Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace.
l.Infof("error producing Config hash: %v", err) lg.Infof("error producing Config hash: %v", err)
return "" return ""
} }
return fmt.Sprintf("%x", h.Sum(nil)) return fmt.Sprintf("%x", h.Sum(nil))
@ -736,7 +736,7 @@ type cfg struct {
ProxyGroup string `json:"proxyGroup"` ProxyGroup string `json:"proxyGroup"`
} }
func svcConfiguredReason(svc *corev1.Service, configured bool, l *zap.SugaredLogger) string { func svcConfiguredReason(svc *corev1.Service, configured bool, lg *zap.SugaredLogger) string {
var r string var r string
if configured { if configured {
r = "ConfiguredFor:" r = "ConfiguredFor:"
@ -750,7 +750,7 @@ func svcConfiguredReason(svc *corev1.Service, configured bool, l *zap.SugaredLog
TailnetTarget: tt, TailnetTarget: tt,
ProxyGroup: svc.Annotations[AnnotationProxyGroup], ProxyGroup: svc.Annotations[AnnotationProxyGroup],
} }
r += fmt.Sprintf(":Config:%s", cfgHash(s, l)) r += fmt.Sprintf(":Config:%s", cfgHash(s, lg))
return r return r
} }

@ -249,9 +249,9 @@ func portsForEndpointSlice(svc *corev1.Service) []discoveryv1.EndpointPort {
return ports return ports
} }
func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap, l *zap.Logger) { func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap, lg *zap.Logger) {
t.Helper() t.Helper()
wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc, clusterIPSvc.Namespace, l.Sugar()) wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc, clusterIPSvc.Namespace, lg.Sugar())
if err := cl.Get(context.Background(), client.ObjectKeyFromObject(cm), cm); err != nil { if err := cl.Get(context.Background(), client.ObjectKeyFromObject(cm), cm); err != nil {
t.Fatalf("Error retrieving ConfigMap: %v", err) t.Fatalf("Error retrieving ConfigMap: %v", err)
} }

@ -1282,8 +1282,8 @@ func TestServiceProxyClassAnnotation(t *testing.T) {
slist := &corev1.SecretList{} slist := &corev1.SecretList{}
fc.List(context.Background(), slist, client.InNamespace("operator-ns")) fc.List(context.Background(), slist, client.InNamespace("operator-ns"))
for _, i := range slist.Items { for _, i := range slist.Items {
l, _ := json.Marshal(i.Labels) labels, _ := json.Marshal(i.Labels)
t.Logf("found secret %q with labels %q ", i.Name, string(l)) t.Logf("found secret %q with labels %q ", i.Name, string(labels))
} }
_, shortName := findGenName(t, fc, "default", "test", "svc") _, shortName := findGenName(t, fc, "default", "test", "svc")

@ -524,16 +524,16 @@ func pgSecretLabels(pgName, secretType string) map[string]string {
} }
func pgLabels(pgName string, customLabels map[string]string) map[string]string { func pgLabels(pgName string, customLabels map[string]string) map[string]string {
l := make(map[string]string, len(customLabels)+3) labels := make(map[string]string, len(customLabels)+3)
for k, v := range customLabels { for k, v := range customLabels {
l[k] = v labels[k] = v
} }
l[kubetypes.LabelManaged] = "true" labels[kubetypes.LabelManaged] = "true"
l[LabelParentType] = "proxygroup" labels[LabelParentType] = "proxygroup"
l[LabelParentName] = pgName labels[LabelParentName] = pgName
return l return labels
} }
func pgOwnerReference(owner *tsapi.ProxyGroup) []metav1.OwnerReference { func pgOwnerReference(owner *tsapi.ProxyGroup) []metav1.OwnerReference {

@ -281,17 +281,17 @@ func env(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar {
} }
func labels(app, instance string, customLabels map[string]string) map[string]string { func labels(app, instance string, customLabels map[string]string) map[string]string {
l := make(map[string]string, len(customLabels)+3) labels := make(map[string]string, len(customLabels)+3)
for k, v := range customLabels { for k, v := range customLabels {
l[k] = v labels[k] = v
} }
// ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ // ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/
l["app.kubernetes.io/name"] = app labels["app.kubernetes.io/name"] = app
l["app.kubernetes.io/instance"] = instance labels["app.kubernetes.io/instance"] = instance
l["app.kubernetes.io/managed-by"] = "tailscale-operator" labels["app.kubernetes.io/managed-by"] = "tailscale-operator"
return l return labels
} }
func tsrOwnerReference(owner metav1.Object) []metav1.OwnerReference { func tsrOwnerReference(owner metav1.Object) []metav1.OwnerReference {

@ -50,32 +50,32 @@ func NewConfigLoader(logger *zap.SugaredLogger, client clientcorev1.CoreV1Interf
} }
} }
func (l *configLoader) WatchConfig(ctx context.Context, path string) error { func (ld *configLoader) WatchConfig(ctx context.Context, path string) error {
secretNamespacedName, isKubeSecret := strings.CutPrefix(path, "kube:") secretNamespacedName, isKubeSecret := strings.CutPrefix(path, "kube:")
if isKubeSecret { if isKubeSecret {
secretNamespace, secretName, ok := strings.Cut(secretNamespacedName, string(types.Separator)) secretNamespace, secretName, ok := strings.Cut(secretNamespacedName, string(types.Separator))
if !ok { if !ok {
return fmt.Errorf("invalid Kubernetes Secret reference %q, expected format <namespace>/<name>", path) return fmt.Errorf("invalid Kubernetes Secret reference %q, expected format <namespace>/<name>", path)
} }
if err := l.watchConfigSecretChanges(ctx, secretNamespace, secretName); err != nil && !errors.Is(err, context.Canceled) { if err := ld.watchConfigSecretChanges(ctx, secretNamespace, secretName); err != nil && !errors.Is(err, context.Canceled) {
return fmt.Errorf("error watching config Secret %q: %w", secretNamespacedName, err) return fmt.Errorf("error watching config Secret %q: %w", secretNamespacedName, err)
} }
return nil return nil
} }
if err := l.watchConfigFileChanges(ctx, path); err != nil && !errors.Is(err, context.Canceled) { if err := ld.watchConfigFileChanges(ctx, path); err != nil && !errors.Is(err, context.Canceled) {
return fmt.Errorf("error watching config file %q: %w", path, err) return fmt.Errorf("error watching config file %q: %w", path, err)
} }
return nil return nil
} }
func (l *configLoader) reloadConfig(ctx context.Context, raw []byte) error { func (ld *configLoader) reloadConfig(ctx context.Context, raw []byte) error {
if bytes.Equal(raw, l.previous) { if bytes.Equal(raw, ld.previous) {
if l.cfgIgnored != nil && testenv.InTest() { if ld.cfgIgnored != nil && testenv.InTest() {
l.once.Do(func() { ld.once.Do(func() {
close(l.cfgIgnored) close(ld.cfgIgnored)
}) })
} }
return nil return nil
@ -89,14 +89,14 @@ func (l *configLoader) reloadConfig(ctx context.Context, raw []byte) error {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return ctx.Err() return ctx.Err()
case l.cfgChan <- &cfg: case ld.cfgChan <- &cfg:
} }
l.previous = raw ld.previous = raw
return nil return nil
} }
func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string) error { func (ld *configLoader) watchConfigFileChanges(ctx context.Context, path string) error {
var ( var (
tickChan <-chan time.Time tickChan <-chan time.Time
eventChan <-chan fsnotify.Event eventChan <-chan fsnotify.Event
@ -106,14 +106,14 @@ func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string)
if w, err := fsnotify.NewWatcher(); err != nil { if w, err := fsnotify.NewWatcher(); err != nil {
// Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor. // Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor.
// See https://github.com/tailscale/tailscale/issues/15081 // See https://github.com/tailscale/tailscale/issues/15081
l.logger.Infof("Failed to create fsnotify watcher on config file %q; watching for changes on 5s timer: %v", path, err) ld.logger.Infof("Failed to create fsnotify watcher on config file %q; watching for changes on 5s timer: %v", path, err)
ticker := time.NewTicker(5 * time.Second) ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop() defer ticker.Stop()
tickChan = ticker.C tickChan = ticker.C
} else { } else {
dir := filepath.Dir(path) dir := filepath.Dir(path)
file := filepath.Base(path) file := filepath.Base(path)
l.logger.Infof("Watching directory %q for changes to config file %q", dir, file) ld.logger.Infof("Watching directory %q for changes to config file %q", dir, file)
defer w.Close() defer w.Close()
if err := w.Add(dir); err != nil { if err := w.Add(dir); err != nil {
return fmt.Errorf("failed to add fsnotify watch: %w", err) return fmt.Errorf("failed to add fsnotify watch: %w", err)
@ -128,7 +128,7 @@ func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string)
if err != nil { if err != nil {
return fmt.Errorf("error reading config file %q: %w", path, err) return fmt.Errorf("error reading config file %q: %w", path, err)
} }
if err := l.reloadConfig(ctx, b); err != nil { if err := ld.reloadConfig(ctx, b); err != nil {
return fmt.Errorf("error loading initial config file %q: %w", path, err) return fmt.Errorf("error loading initial config file %q: %w", path, err)
} }
@ -163,14 +163,14 @@ func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string)
if len(b) == 0 { if len(b) == 0 {
continue continue
} }
if err := l.reloadConfig(ctx, b); err != nil { if err := ld.reloadConfig(ctx, b); err != nil {
return fmt.Errorf("error reloading config file %q: %v", path, err) return fmt.Errorf("error reloading config file %q: %v", path, err)
} }
} }
} }
func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNamespace, secretName string) error { func (ld *configLoader) watchConfigSecretChanges(ctx context.Context, secretNamespace, secretName string) error {
secrets := l.client.Secrets(secretNamespace) secrets := ld.client.Secrets(secretNamespace)
w, err := secrets.Watch(ctx, metav1.ListOptions{ w, err := secrets.Watch(ctx, metav1.ListOptions{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "Secret", Kind: "Secret",
@ -198,11 +198,11 @@ func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNames
return fmt.Errorf("failed to get config Secret %q: %w", secretName, err) return fmt.Errorf("failed to get config Secret %q: %w", secretName, err)
} }
if err := l.configFromSecret(ctx, secret); err != nil { if err := ld.configFromSecret(ctx, secret); err != nil {
return fmt.Errorf("error loading initial config: %w", err) return fmt.Errorf("error loading initial config: %w", err)
} }
l.logger.Infof("Watching config Secret %q for changes", secretName) ld.logger.Infof("Watching config Secret %q for changes", secretName)
for { for {
var secret *corev1.Secret var secret *corev1.Secret
select { select {
@ -237,7 +237,7 @@ func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNames
if secret == nil || secret.Data == nil { if secret == nil || secret.Data == nil {
continue continue
} }
if err := l.configFromSecret(ctx, secret); err != nil { if err := ld.configFromSecret(ctx, secret); err != nil {
return fmt.Errorf("error reloading config Secret %q: %v", secret.Name, err) return fmt.Errorf("error reloading config Secret %q: %v", secret.Name, err)
} }
case watch.Error: case watch.Error:
@ -250,13 +250,13 @@ func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNames
} }
} }
func (l *configLoader) configFromSecret(ctx context.Context, s *corev1.Secret) error { func (ld *configLoader) configFromSecret(ctx context.Context, s *corev1.Secret) error {
b := s.Data[kubetypes.KubeAPIServerConfigFile] b := s.Data[kubetypes.KubeAPIServerConfigFile]
if len(b) == 0 { if len(b) == 0 {
return fmt.Errorf("config Secret %q does not contain expected config in key %q", s.Name, kubetypes.KubeAPIServerConfigFile) return fmt.Errorf("config Secret %q does not contain expected config in key %q", s.Name, kubetypes.KubeAPIServerConfigFile)
} }
if err := l.reloadConfig(ctx, b); err != nil { if err := ld.reloadConfig(ctx, b); err != nil {
return err return err
} }

@ -125,15 +125,15 @@ func TestWatchConfig(t *testing.T) {
} }
} }
configChan := make(chan *conf.Config) configChan := make(chan *conf.Config)
l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) loader := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan)
l.cfgIgnored = make(chan struct{}) loader.cfgIgnored = make(chan struct{})
errs := make(chan error) errs := make(chan error)
ctx, cancel := context.WithCancel(t.Context()) ctx, cancel := context.WithCancel(t.Context())
defer cancel() defer cancel()
writeFile(t, tc.initialConfig) writeFile(t, tc.initialConfig)
go func() { go func() {
errs <- l.WatchConfig(ctx, cfgPath) errs <- loader.WatchConfig(ctx, cfgPath)
}() }()
for i, p := range tc.phases { for i, p := range tc.phases {
@ -159,7 +159,7 @@ func TestWatchConfig(t *testing.T) {
} else if !strings.Contains(err.Error(), p.expectedErr) { } else if !strings.Contains(err.Error(), p.expectedErr) {
t.Fatalf("expected error to contain %q, got %q", p.expectedErr, err.Error()) t.Fatalf("expected error to contain %q, got %q", p.expectedErr, err.Error())
} }
case <-l.cfgIgnored: case <-loader.cfgIgnored:
if p.expectedConf != nil { if p.expectedConf != nil {
t.Fatalf("expected config to be reloaded, but got ignored signal") t.Fatalf("expected config to be reloaded, but got ignored signal")
} }
@ -192,13 +192,13 @@ func TestWatchConfigSecret_Rewatches(t *testing.T) {
}) })
configChan := make(chan *conf.Config) configChan := make(chan *conf.Config)
l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) loader := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan)
mustCreateOrUpdate(t, cl, secretFrom(expected[0])) mustCreateOrUpdate(t, cl, secretFrom(expected[0]))
errs := make(chan error) errs := make(chan error)
go func() { go func() {
errs <- l.watchConfigSecretChanges(t.Context(), "default", "config-secret") errs <- loader.watchConfigSecretChanges(t.Context(), "default", "config-secret")
}() }()
for i := range 2 { for i := range 2 {
@ -212,7 +212,7 @@ func TestWatchConfigSecret_Rewatches(t *testing.T) {
} }
case err := <-errs: case err := <-errs:
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
case <-l.cfgIgnored: case <-loader.cfgIgnored:
t.Fatalf("expected config to be reloaded, but got ignored signal") t.Fatalf("expected config to be reloaded, but got ignored signal")
case <-time.After(5 * time.Second): case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for expected event") t.Fatalf("timed out waiting for expected event")

@ -422,9 +422,9 @@ func (ipp *ConsensusIPPool) applyCheckoutAddr(nid tailcfg.NodeID, domain string,
} }
// Apply is part of the raft.FSM interface. It takes an incoming log entry and applies it to the state. // Apply is part of the raft.FSM interface. It takes an incoming log entry and applies it to the state.
func (ipp *ConsensusIPPool) Apply(l *raft.Log) any { func (ipp *ConsensusIPPool) Apply(lg *raft.Log) any {
var c tsconsensus.Command var c tsconsensus.Command
if err := json.Unmarshal(l.Data, &c); err != nil { if err := json.Unmarshal(lg.Data, &c); err != nil {
panic(fmt.Sprintf("failed to unmarshal command: %s", err.Error())) panic(fmt.Sprintf("failed to unmarshal command: %s", err.Error()))
} }
switch c.Name { switch c.Name {

@ -156,13 +156,13 @@ func TestSNIProxyWithNetmapConfig(t *testing.T) {
client, _, _ := startNode(t, ctx, controlURL, "client") client, _, _ := startNode(t, ctx, controlURL, "client")
// Make sure that the sni node has received its config. // Make sure that the sni node has received its config.
l, err := sni.LocalClient() lc, err := sni.LocalClient()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
gotConfigured := false gotConfigured := false
for range 100 { for range 100 {
s, err := l.StatusWithoutPeers(ctx) s, err := lc.StatusWithoutPeers(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

@ -135,18 +135,18 @@ type lportsPool struct {
ports []int ports []int
} }
func (l *lportsPool) get() int { func (pl *lportsPool) get() int {
l.Lock() pl.Lock()
defer l.Unlock() defer pl.Unlock()
ret := l.ports[0] ret := pl.ports[0]
l.ports = append(l.ports[:0], l.ports[1:]...) pl.ports = append(pl.ports[:0], pl.ports[1:]...)
return ret return ret
} }
func (l *lportsPool) put(i int) { func (pl *lportsPool) put(i int) {
l.Lock() pl.Lock()
defer l.Unlock() defer pl.Unlock()
l.ports = append(l.ports, int(i)) pl.ports = append(pl.ports, int(i))
} }
var ( var (
@ -173,19 +173,19 @@ func init() {
// measure dial time. // measure dial time.
type lportForTCPConn int type lportForTCPConn int
func (l *lportForTCPConn) Close() error { func (lp *lportForTCPConn) Close() error {
if *l == 0 { if *lp == 0 {
return nil return nil
} }
lports.put(int(*l)) lports.put(int(*lp))
return nil return nil
} }
func (l *lportForTCPConn) Write([]byte) (int, error) { func (lp *lportForTCPConn) Write([]byte) (int, error) {
return 0, errors.New("unimplemented") return 0, errors.New("unimplemented")
} }
func (l *lportForTCPConn) Read([]byte) (int, error) { func (lp *lportForTCPConn) Read([]byte) (int, error) {
return 0, errors.New("unimplemented") return 0, errors.New("unimplemented")
} }

@ -65,9 +65,9 @@ func main() {
} }
add, remove := diffTags(stags, dtags) add, remove := diffTags(stags, dtags)
if l := len(add); l > 0 { if ln := len(add); ln > 0 {
log.Printf("%d tags to push: %s", len(add), strings.Join(add, ", ")) log.Printf("%d tags to push: %s", len(add), strings.Join(add, ", "))
if *max > 0 && l > *max { if *max > 0 && ln > *max {
log.Printf("Limiting sync to %d tags", *max) log.Printf("Limiting sync to %d tags", *max)
add = add[:*max] add = add[:*max]
} }

@ -75,8 +75,8 @@ func peerInfo(peer *ipnstate.TKAPeer) string {
// print prints a message about a node key signature and a re-signing command if needed. // print prints a message about a node key signature and a re-signing command if needed.
func print(info string, nodeKey key.NodePublic, sig tka.NodeKeySignature) { func print(info string, nodeKey key.NodePublic, sig tka.NodeKeySignature) {
if l := chainLength(sig); l > *maxRotations { if ln := chainLength(sig); ln > *maxRotations {
log.Printf("%s: chain length %d, printing command to re-sign", info, l) log.Printf("%s: chain length %d, printing command to re-sign", info, ln)
wrapping, _ := sig.UnverifiedWrappingPublic() wrapping, _ := sig.UnverifiedWrappingPublic()
fmt.Printf("tailscale lock sign %s %s\n", nodeKey, key.NLPublicFromEd25519Unsafe(wrapping).CLIString()) fmt.Printf("tailscale lock sign %s %s\n", nodeKey, key.NLPublicFromEd25519Unsafe(wrapping).CLIString())
} else { } else {

@ -25,12 +25,12 @@ func newConnListener() *connListener {
} }
} }
func (l *connListener) Accept() (net.Conn, error) { func (ln *connListener) Accept() (net.Conn, error) {
select { select {
case <-l.closedCh: case <-ln.closedCh:
// TODO(oxtoacart): make this error match what a regular net.Listener does // TODO(oxtoacart): make this error match what a regular net.Listener does
return nil, syscall.EINVAL return nil, syscall.EINVAL
case conn := <-l.ch: case conn := <-ln.ch:
return conn, nil return conn, nil
} }
} }
@ -38,32 +38,32 @@ func (l *connListener) Accept() (net.Conn, error) {
// Addr implements net.Listener. This always returns nil. It is assumed that // Addr implements net.Listener. This always returns nil. It is assumed that
// this method is currently unused, so it logs a warning if it ever does get // this method is currently unused, so it logs a warning if it ever does get
// called. // called.
func (l *connListener) Addr() net.Addr { func (ln *connListener) Addr() net.Addr {
log.Println("warning: unexpected call to connListener.Addr()") log.Println("warning: unexpected call to connListener.Addr()")
return nil return nil
} }
func (l *connListener) Close() error { func (ln *connListener) Close() error {
l.closeMu.Lock() ln.closeMu.Lock()
defer l.closeMu.Unlock() defer ln.closeMu.Unlock()
select { select {
case <-l.closedCh: case <-ln.closedCh:
// Already closed. // Already closed.
return syscall.EINVAL return syscall.EINVAL
default: default:
// We don't close l.ch because someone maybe trying to send to that, // We don't close l.ch because someone maybe trying to send to that,
// which would cause a panic. // which would cause a panic.
close(l.closedCh) close(ln.closedCh)
return nil return nil
} }
} }
func (l *connListener) HandleConn(c net.Conn, remoteAddr net.Addr) error { func (ln *connListener) HandleConn(c net.Conn, remoteAddr net.Addr) error {
select { select {
case <-l.closedCh: case <-ln.closedCh:
return syscall.EINVAL return syscall.EINVAL
case l.ch <- &connWithRemoteAddr{Conn: c, remoteAddr: remoteAddr}: case ln.ch <- &connWithRemoteAddr{Conn: c, remoteAddr: remoteAddr}:
// Connection has been accepted. // Connection has been accepted.
} }
return nil return nil

@ -10,20 +10,20 @@ import (
) )
func TestConnListener(t *testing.T) { func TestConnListener(t *testing.T) {
l, err := net.Listen("tcp", "127.0.0.1:") ln, err := net.Listen("tcp", "127.0.0.1:")
if err != nil { if err != nil {
t.Fatalf("failed to Listen: %s", err) t.Fatalf("failed to Listen: %s", err)
} }
cl := newConnListener() cl := newConnListener()
// Test that we can accept a connection // Test that we can accept a connection
cc, err := net.Dial("tcp", l.Addr().String()) cc, err := net.Dial("tcp", ln.Addr().String())
if err != nil { if err != nil {
t.Fatalf("failed to Dial: %s", err) t.Fatalf("failed to Dial: %s", err)
} }
defer cc.Close() defer cc.Close()
sc, err := l.Accept() sc, err := ln.Accept()
if err != nil { if err != nil {
t.Fatalf("failed to Accept: %s", err) t.Fatalf("failed to Accept: %s", err)
} }

@ -467,14 +467,14 @@ func newSystem(t *testing.T) *system {
tstest.ResourceCheck(t) tstest.ResourceCheck(t)
fs := newFileSystemForLocal(log.Printf, nil) fs := newFileSystemForLocal(log.Printf, nil)
l, err := net.Listen("tcp", "127.0.0.1:0") ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil { if err != nil {
t.Fatalf("failed to Listen: %s", err) t.Fatalf("failed to Listen: %s", err)
} }
t.Logf("FileSystemForLocal listening at %s", l.Addr()) t.Logf("FileSystemForLocal listening at %s", ln.Addr())
go func() { go func() {
for { for {
conn, err := l.Accept() conn, err := ln.Accept()
if err != nil { if err != nil {
t.Logf("Accept: %v", err) t.Logf("Accept: %v", err)
return return
@ -483,11 +483,11 @@ func newSystem(t *testing.T) *system {
} }
}() }()
client := gowebdav.NewAuthClient(fmt.Sprintf("http://%s", l.Addr()), &noopAuthorizer{}) client := gowebdav.NewAuthClient(fmt.Sprintf("http://%s", ln.Addr()), &noopAuthorizer{})
client.SetTransport(&http.Transport{DisableKeepAlives: true}) client.SetTransport(&http.Transport{DisableKeepAlives: true})
s := &system{ s := &system{
t: t, t: t,
local: &local{l: l, fs: fs}, local: &local{l: ln, fs: fs},
client: client, client: client,
remotes: make(map[string]*remote), remotes: make(map[string]*remote),
} }
@ -496,11 +496,11 @@ func newSystem(t *testing.T) *system {
} }
func (s *system) addRemote(name string) string { func (s *system) addRemote(name string) string {
l, err := net.Listen("tcp", "127.0.0.1:0") ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil { if err != nil {
s.t.Fatalf("failed to Listen: %s", err) s.t.Fatalf("failed to Listen: %s", err)
} }
s.t.Logf("Remote for %v listening at %s", name, l.Addr()) s.t.Logf("Remote for %v listening at %s", name, ln.Addr())
fileServer, err := NewFileServer() fileServer, err := NewFileServer()
if err != nil { if err != nil {
@ -510,14 +510,14 @@ func (s *system) addRemote(name string) string {
s.t.Logf("FileServer for %v listening at %s", name, fileServer.Addr()) s.t.Logf("FileServer for %v listening at %s", name, fileServer.Addr())
r := &remote{ r := &remote{
l: l, l: ln,
fileServer: fileServer, fileServer: fileServer,
fs: NewFileSystemForRemote(log.Printf), fs: NewFileSystemForRemote(log.Printf),
shares: make(map[string]string), shares: make(map[string]string),
permissions: make(map[string]drive.Permission), permissions: make(map[string]drive.Permission),
} }
r.fs.SetFileServerAddr(fileServer.Addr()) r.fs.SetFileServerAddr(fileServer.Addr())
go http.Serve(l, r) go http.Serve(ln, r)
s.remotes[name] = r s.remotes[name] = r
remotes := make([]*drive.Remote, 0, len(s.remotes)) remotes := make([]*drive.Remote, 0, len(s.remotes))

@ -20,7 +20,7 @@ import (
// It's typically used in a separate process from the actual Taildrive server to // It's typically used in a separate process from the actual Taildrive server to
// serve up files as an unprivileged user. // serve up files as an unprivileged user.
type FileServer struct { type FileServer struct {
l net.Listener ln net.Listener
secretToken string secretToken string
shareHandlers map[string]http.Handler shareHandlers map[string]http.Handler
sharesMu sync.RWMutex sharesMu sync.RWMutex
@ -41,10 +41,10 @@ type FileServer struct {
// called. // called.
func NewFileServer() (*FileServer, error) { func NewFileServer() (*FileServer, error) {
// path := filepath.Join(os.TempDir(), fmt.Sprintf("%v.socket", uuid.New().String())) // path := filepath.Join(os.TempDir(), fmt.Sprintf("%v.socket", uuid.New().String()))
// l, err := safesocket.Listen(path) // ln, err := safesocket.Listen(path)
// if err != nil { // if err != nil {
// TODO(oxtoacart): actually get safesocket working in more environments (MacOS Sandboxed, Windows, ???) // TODO(oxtoacart): actually get safesocket working in more environments (MacOS Sandboxed, Windows, ???)
l, err := net.Listen("tcp", "127.0.0.1:0") ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil { if err != nil {
return nil, fmt.Errorf("listen: %w", err) return nil, fmt.Errorf("listen: %w", err)
} }
@ -55,7 +55,7 @@ func NewFileServer() (*FileServer, error) {
} }
return &FileServer{ return &FileServer{
l: l, ln: ln,
secretToken: secretToken, secretToken: secretToken,
shareHandlers: make(map[string]http.Handler), shareHandlers: make(map[string]http.Handler),
}, nil }, nil
@ -74,12 +74,12 @@ func generateSecretToken() (string, error) {
// Addr returns the address at which this FileServer is listening. This // Addr returns the address at which this FileServer is listening. This
// includes the secret token in front of the address, delimited by a pipe |. // includes the secret token in front of the address, delimited by a pipe |.
func (s *FileServer) Addr() string { func (s *FileServer) Addr() string {
return fmt.Sprintf("%s|%s", s.secretToken, s.l.Addr().String()) return fmt.Sprintf("%s|%s", s.secretToken, s.ln.Addr().String())
} }
// Serve() starts serving files and blocks until it encounters a fatal error. // Serve() starts serving files and blocks until it encounters a fatal error.
func (s *FileServer) Serve() error { func (s *FileServer) Serve() error {
return http.Serve(s.l, s) return http.Serve(s.ln, s)
} }
// LockShares locks the map of shares in preparation for manipulating it. // LockShares locks the map of shares in preparation for manipulating it.
@ -162,5 +162,5 @@ func (s *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
func (s *FileServer) Close() error { func (s *FileServer) Close() error {
return s.l.Close() return s.ln.Close()
} }

@ -29,8 +29,8 @@ type logOnce struct {
sync.Once sync.Once
} }
func (l *logOnce) logf(format string, args ...any) { func (lg *logOnce) logf(format string, args ...any) {
l.Once.Do(func() { lg.Once.Do(func() {
log.Printf(format, args...) log.Printf(format, args...)
}) })
} }

@ -266,12 +266,12 @@ func (h *Handler) serveTKALog(w http.ResponseWriter, r *http.Request) {
limit := 50 limit := 50
if limitStr := r.FormValue("limit"); limitStr != "" { if limitStr := r.FormValue("limit"); limitStr != "" {
l, err := strconv.Atoi(limitStr) lm, err := strconv.Atoi(limitStr)
if err != nil { if err != nil {
http.Error(w, "parsing 'limit' parameter: "+err.Error(), http.StatusBadRequest) http.Error(w, "parsing 'limit' parameter: "+err.Error(), http.StatusBadRequest)
return return
} }
limit = int(l) limit = int(lm)
} }
updates, err := h.b.NetworkLockLog(limit) updates, err := h.b.NetworkLockLog(limit)

@ -352,12 +352,12 @@ type ServiceMonitor struct {
type Labels map[string]LabelValue type Labels map[string]LabelValue
func (l Labels) Parse() map[string]string { func (lb Labels) Parse() map[string]string {
if l == nil { if lb == nil {
return nil return nil
} }
m := make(map[string]string, len(l)) m := make(map[string]string, len(lb))
for k, v := range l { for k, v := range lb {
m[k] = string(v) m[k] = string(v)
} }
return m return m

@ -99,7 +99,7 @@ func Test_conn_Read(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
l := zl.Sugar() log := zl.Sugar()
tc := &fakes.TestConn{} tc := &fakes.TestConn{}
sr := &fakes.TestSessionRecorder{} sr := &fakes.TestSessionRecorder{}
rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar())
@ -110,7 +110,7 @@ func Test_conn_Read(t *testing.T) {
c := &conn{ c := &conn{
ctx: ctx, ctx: ctx,
Conn: tc, Conn: tc,
log: l, log: log,
hasTerm: true, hasTerm: true,
initialCastHeaderSent: make(chan struct{}), initialCastHeaderSent: make(chan struct{}),
rec: rec, rec: rec,

@ -69,12 +69,12 @@ var _ json.Unmarshaler = &PortMaps{}
func (p *PortMaps) UnmarshalJSON(data []byte) error { func (p *PortMaps) UnmarshalJSON(data []byte) error {
*p = make(map[PortMap]struct{}) *p = make(map[PortMap]struct{})
var l []PortMap var v []PortMap
if err := json.Unmarshal(data, &l); err != nil { if err := json.Unmarshal(data, &v); err != nil {
return err return err
} }
for _, pm := range l { for _, pm := range v {
(*p)[pm] = struct{}{} (*p)[pm] = struct{}{}
} }
@ -82,12 +82,12 @@ func (p *PortMaps) UnmarshalJSON(data []byte) error {
} }
func (p PortMaps) MarshalJSON() ([]byte, error) { func (p PortMaps) MarshalJSON() ([]byte, error) {
l := make([]PortMap, 0, len(p)) v := make([]PortMap, 0, len(p))
for pm := range p { for pm := range p {
l = append(l, pm) v = append(v, pm)
} }
return json.Marshal(l) return json.Marshal(v)
} }
// Status represents the currently configured firewall rules for all egress // Status represents the currently configured firewall rules for all egress

@ -40,10 +40,10 @@ type localClient struct {
lc *local.Client lc *local.Client
} }
func (l *localClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) { func (lc *localClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) {
return l.lc.WatchIPNBus(ctx, mask) return lc.lc.WatchIPNBus(ctx, mask)
} }
func (l *localClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) { func (lc *localClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) {
return l.lc.CertPair(ctx, domain) return lc.lc.CertPair(ctx, domain)
} }

@ -146,33 +146,33 @@ func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *ne
// SetLoggingEnabled enables or disables logging. // SetLoggingEnabled enables or disables logging.
// When disabled, socket stats are not polled and no new logs are written to disk. // When disabled, socket stats are not polled and no new logs are written to disk.
// Existing logs can still be fetched via the C2N API. // Existing logs can still be fetched via the C2N API.
func (l *Logger) SetLoggingEnabled(v bool) { func (lg *Logger) SetLoggingEnabled(v bool) {
old := l.enabled.Load() old := lg.enabled.Load()
if old != v && l.enabled.CompareAndSwap(old, v) { if old != v && lg.enabled.CompareAndSwap(old, v) {
if v { if v {
if l.eventCh == nil { if lg.eventCh == nil {
// eventCh should be large enough for the number of events that will occur within logInterval. // eventCh should be large enough for the number of events that will occur within logInterval.
// Add an extra second's worth of events to ensure we don't drop any. // Add an extra second's worth of events to ensure we don't drop any.
l.eventCh = make(chan event, (logInterval+time.Second)/pollInterval) lg.eventCh = make(chan event, (logInterval+time.Second)/pollInterval)
} }
l.ctx, l.cancelFn = context.WithCancel(context.Background()) lg.ctx, lg.cancelFn = context.WithCancel(context.Background())
go l.poll() go lg.poll()
go l.logEvents() go lg.logEvents()
} else { } else {
l.cancelFn() lg.cancelFn()
} }
} }
} }
func (l *Logger) Write(p []byte) (int, error) { func (lg *Logger) Write(p []byte) (int, error) {
return l.logger.Write(p) return lg.logger.Write(p)
} }
// poll fetches the current socket stats at the configured time interval, // poll fetches the current socket stats at the configured time interval,
// calculates the delta since the last poll, // calculates the delta since the last poll,
// and writes any non-zero values to the logger event channel. // and writes any non-zero values to the logger event channel.
// This method does not return. // This method does not return.
func (l *Logger) poll() { func (lg *Logger) poll() {
// last is the last set of socket stats we saw. // last is the last set of socket stats we saw.
var lastStats *sockstats.SockStats var lastStats *sockstats.SockStats
var lastTime time.Time var lastTime time.Time
@ -180,7 +180,7 @@ func (l *Logger) poll() {
ticker := time.NewTicker(pollInterval) ticker := time.NewTicker(pollInterval)
for { for {
select { select {
case <-l.ctx.Done(): case <-lg.ctx.Done():
ticker.Stop() ticker.Stop()
return return
case t := <-ticker.C: case t := <-ticker.C:
@ -196,7 +196,7 @@ func (l *Logger) poll() {
if stats.CurrentInterfaceCellular { if stats.CurrentInterfaceCellular {
e.IsCellularInterface = 1 e.IsCellularInterface = 1
} }
l.eventCh <- e lg.eventCh <- e
} }
} }
lastTime = t lastTime = t
@ -207,14 +207,14 @@ func (l *Logger) poll() {
// logEvents reads events from the event channel at logInterval and logs them to disk. // logEvents reads events from the event channel at logInterval and logs them to disk.
// This method does not return. // This method does not return.
func (l *Logger) logEvents() { func (lg *Logger) logEvents() {
enc := json.NewEncoder(l) enc := json.NewEncoder(lg)
flush := func() { flush := func() {
for { for {
select { select {
case e := <-l.eventCh: case e := <-lg.eventCh:
if err := enc.Encode(e); err != nil { if err := enc.Encode(e); err != nil {
l.logf("sockstatlog: error encoding log: %v", err) lg.logf("sockstatlog: error encoding log: %v", err)
} }
default: default:
return return
@ -224,7 +224,7 @@ func (l *Logger) logEvents() {
ticker := time.NewTicker(logInterval) ticker := time.NewTicker(logInterval)
for { for {
select { select {
case <-l.ctx.Done(): case <-lg.ctx.Done():
ticker.Stop() ticker.Stop()
return return
case <-ticker.C: case <-ticker.C:
@ -233,29 +233,29 @@ func (l *Logger) logEvents() {
} }
} }
func (l *Logger) LogID() string { func (lg *Logger) LogID() string {
if l.logger == nil { if lg.logger == nil {
return "" return ""
} }
return l.logger.PrivateID().Public().String() return lg.logger.PrivateID().Public().String()
} }
// Flush sends pending logs to the log server and flushes them from the local buffer. // Flush sends pending logs to the log server and flushes them from the local buffer.
func (l *Logger) Flush() { func (lg *Logger) Flush() {
l.logger.StartFlush() lg.logger.StartFlush()
} }
func (l *Logger) Shutdown(ctx context.Context) { func (lg *Logger) Shutdown(ctx context.Context) {
if l.cancelFn != nil { if lg.cancelFn != nil {
l.cancelFn() lg.cancelFn()
} }
l.filch.Close() lg.filch.Close()
l.logger.Shutdown(ctx) lg.logger.Shutdown(ctx)
type closeIdler interface { type closeIdler interface {
CloseIdleConnections() CloseIdleConnections()
} }
if tr, ok := l.tr.(closeIdler); ok { if tr, ok := lg.tr.(closeIdler); ok {
tr.CloseIdleConnections() tr.CloseIdleConnections()
} }
} }

@ -193,8 +193,8 @@ type logWriter struct {
logger *log.Logger logger *log.Logger
} }
func (l logWriter) Write(buf []byte) (int, error) { func (lg logWriter) Write(buf []byte) (int, error) {
l.logger.Printf("%s", buf) lg.logger.Printf("%s", buf)
return len(buf), nil return len(buf), nil
} }

@ -100,7 +100,7 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger {
if !cfg.CopyPrivateID.IsZero() { if !cfg.CopyPrivateID.IsZero() {
urlSuffix = "?copyId=" + cfg.CopyPrivateID.String() urlSuffix = "?copyId=" + cfg.CopyPrivateID.String()
} }
l := &Logger{ logger := &Logger{
privateID: cfg.PrivateID, privateID: cfg.PrivateID,
stderr: cfg.Stderr, stderr: cfg.Stderr,
stderrLevel: int64(cfg.StderrLevel), stderrLevel: int64(cfg.StderrLevel),
@ -124,19 +124,19 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger {
} }
if cfg.Bus != nil { if cfg.Bus != nil {
l.eventClient = cfg.Bus.Client("logtail.Logger") logger.eventClient = cfg.Bus.Client("logtail.Logger")
// Subscribe to change deltas from NetMon to detect when the network comes up. // Subscribe to change deltas from NetMon to detect when the network comes up.
eventbus.SubscribeFunc(l.eventClient, l.onChangeDelta) eventbus.SubscribeFunc(logger.eventClient, logger.onChangeDelta)
} }
l.SetSockstatsLabel(sockstats.LabelLogtailLogger) logger.SetSockstatsLabel(sockstats.LabelLogtailLogger)
l.compressLogs = cfg.CompressLogs logger.compressLogs = cfg.CompressLogs
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
l.uploadCancel = cancel logger.uploadCancel = cancel
go l.uploading(ctx) go logger.uploading(ctx)
l.Write([]byte("logtail started")) logger.Write([]byte("logtail started"))
return l return logger
} }
// Logger writes logs, splitting them as configured between local // Logger writes logs, splitting them as configured between local
@ -190,27 +190,27 @@ func (p *atomicSocktatsLabel) Store(label sockstats.Label) { p.p.Store(uint32(la
// SetVerbosityLevel controls the verbosity level that should be // SetVerbosityLevel controls the verbosity level that should be
// written to stderr. 0 is the default (not verbose). Levels 1 or higher // written to stderr. 0 is the default (not verbose). Levels 1 or higher
// are increasingly verbose. // are increasingly verbose.
func (l *Logger) SetVerbosityLevel(level int) { func (lg *Logger) SetVerbosityLevel(level int) {
atomic.StoreInt64(&l.stderrLevel, int64(level)) atomic.StoreInt64(&lg.stderrLevel, int64(level))
} }
// SetNetMon sets the network monitor. // SetNetMon sets the network monitor.
// //
// It should not be changed concurrently with log writes and should // It should not be changed concurrently with log writes and should
// only be set once. // only be set once.
func (l *Logger) SetNetMon(lm *netmon.Monitor) { func (lg *Logger) SetNetMon(lm *netmon.Monitor) {
l.netMonitor = lm lg.netMonitor = lm
} }
// SetSockstatsLabel sets the label used in sockstat logs to identify network traffic from this logger. // SetSockstatsLabel sets the label used in sockstat logs to identify network traffic from this logger.
func (l *Logger) SetSockstatsLabel(label sockstats.Label) { func (lg *Logger) SetSockstatsLabel(label sockstats.Label) {
l.sockstatsLabel.Store(label) lg.sockstatsLabel.Store(label)
} }
// PrivateID returns the logger's private log ID. // PrivateID returns the logger's private log ID.
// //
// It exists for internal use only. // It exists for internal use only.
func (l *Logger) PrivateID() logid.PrivateID { return l.privateID } func (lg *Logger) PrivateID() logid.PrivateID { return lg.privateID }
// Shutdown gracefully shuts down the logger while completing any // Shutdown gracefully shuts down the logger while completing any
// remaining uploads. // remaining uploads.
@ -218,33 +218,33 @@ func (l *Logger) PrivateID() logid.PrivateID { return l.privateID }
// It will block, continuing to try and upload unless the passed // It will block, continuing to try and upload unless the passed
// context object interrupts it by being done. // context object interrupts it by being done.
// If the shutdown is interrupted, an error is returned. // If the shutdown is interrupted, an error is returned.
func (l *Logger) Shutdown(ctx context.Context) error { func (lg *Logger) Shutdown(ctx context.Context) error {
done := make(chan struct{}) done := make(chan struct{})
go func() { go func() {
select { select {
case <-ctx.Done(): case <-ctx.Done():
l.uploadCancel() lg.uploadCancel()
<-l.shutdownDone <-lg.shutdownDone
case <-l.shutdownDone: case <-lg.shutdownDone:
} }
close(done) close(done)
l.httpc.CloseIdleConnections() lg.httpc.CloseIdleConnections()
}() }()
if l.eventClient != nil { if lg.eventClient != nil {
l.eventClient.Close() lg.eventClient.Close()
} }
l.shutdownStartMu.Lock() lg.shutdownStartMu.Lock()
select { select {
case <-l.shutdownStart: case <-lg.shutdownStart:
l.shutdownStartMu.Unlock() lg.shutdownStartMu.Unlock()
return nil return nil
default: default:
} }
close(l.shutdownStart) close(lg.shutdownStart)
l.shutdownStartMu.Unlock() lg.shutdownStartMu.Unlock()
io.WriteString(l, "logger closing down\n") io.WriteString(lg, "logger closing down\n")
<-done <-done
return nil return nil
@ -254,8 +254,8 @@ func (l *Logger) Shutdown(ctx context.Context) error {
// process, and any associated goroutines. // process, and any associated goroutines.
// //
// Deprecated: use Shutdown // Deprecated: use Shutdown
func (l *Logger) Close() { func (lg *Logger) Close() {
l.Shutdown(context.Background()) lg.Shutdown(context.Background())
} }
// drainBlock is called by drainPending when there are no logs to drain. // drainBlock is called by drainPending when there are no logs to drain.
@ -265,11 +265,11 @@ func (l *Logger) Close() {
// //
// If the caller specified FlushInterface, drainWake is only sent to // If the caller specified FlushInterface, drainWake is only sent to
// periodically. // periodically.
func (l *Logger) drainBlock() (shuttingDown bool) { func (lg *Logger) drainBlock() (shuttingDown bool) {
select { select {
case <-l.shutdownStart: case <-lg.shutdownStart:
return true return true
case <-l.drainWake: case <-lg.drainWake:
} }
return false return false
} }
@ -277,20 +277,20 @@ func (l *Logger) drainBlock() (shuttingDown bool) {
// drainPending drains and encodes a batch of logs from the buffer for upload. // drainPending drains and encodes a batch of logs from the buffer for upload.
// If no logs are available, drainPending blocks until logs are available. // If no logs are available, drainPending blocks until logs are available.
// The returned buffer is only valid until the next call to drainPending. // The returned buffer is only valid until the next call to drainPending.
func (l *Logger) drainPending() (b []byte) { func (lg *Logger) drainPending() (b []byte) {
b = l.drainBuf[:0] b = lg.drainBuf[:0]
b = append(b, '[') b = append(b, '[')
defer func() { defer func() {
b = bytes.TrimRight(b, ",") b = bytes.TrimRight(b, ",")
b = append(b, ']') b = append(b, ']')
l.drainBuf = b lg.drainBuf = b
if len(b) <= len("[]") { if len(b) <= len("[]") {
b = nil b = nil
} }
}() }()
maxLen := cmp.Or(l.maxUploadSize, maxSize) maxLen := cmp.Or(lg.maxUploadSize, maxSize)
if l.lowMem { if lg.lowMem {
// When operating in a low memory environment, it is better to upload // When operating in a low memory environment, it is better to upload
// in multiple operations than it is to allocate a large body and OOM. // in multiple operations than it is to allocate a large body and OOM.
// Even if maxLen is less than maxSize, we can still upload an entry // Even if maxLen is less than maxSize, we can still upload an entry
@ -298,13 +298,13 @@ func (l *Logger) drainPending() (b []byte) {
maxLen /= lowMemRatio maxLen /= lowMemRatio
} }
for len(b) < maxLen { for len(b) < maxLen {
line, err := l.buffer.TryReadLine() line, err := lg.buffer.TryReadLine()
switch { switch {
case err == io.EOF: case err == io.EOF:
return b return b
case err != nil: case err != nil:
b = append(b, '{') b = append(b, '{')
b = l.appendMetadata(b, false, true, 0, 0, "reading ringbuffer: "+err.Error(), nil, 0) b = lg.appendMetadata(b, false, true, 0, 0, "reading ringbuffer: "+err.Error(), nil, 0)
b = bytes.TrimRight(b, ",") b = bytes.TrimRight(b, ",")
b = append(b, '}') b = append(b, '}')
return b return b
@ -318,10 +318,10 @@ func (l *Logger) drainPending() (b []byte) {
// in our buffer from a previous large write, let it go. // in our buffer from a previous large write, let it go.
if cap(b) > bufferSize { if cap(b) > bufferSize {
b = bytes.Clone(b) b = bytes.Clone(b)
l.drainBuf = b lg.drainBuf = b
} }
if shuttingDown := l.drainBlock(); shuttingDown { if shuttingDown := lg.drainBlock(); shuttingDown {
return b return b
} }
continue continue
@ -338,18 +338,18 @@ func (l *Logger) drainPending() (b []byte) {
default: default:
// This is probably a log added to stderr by filch // This is probably a log added to stderr by filch
// outside of the logtail logger. Encode it. // outside of the logtail logger. Encode it.
if !l.explainedRaw { if !lg.explainedRaw {
fmt.Fprintf(l.stderr, "RAW-STDERR: ***\n") fmt.Fprintf(lg.stderr, "RAW-STDERR: ***\n")
fmt.Fprintf(l.stderr, "RAW-STDERR: *** Lines prefixed with RAW-STDERR below bypassed logtail and probably come from a previous run of the program\n") fmt.Fprintf(lg.stderr, "RAW-STDERR: *** Lines prefixed with RAW-STDERR below bypassed logtail and probably come from a previous run of the program\n")
fmt.Fprintf(l.stderr, "RAW-STDERR: ***\n") fmt.Fprintf(lg.stderr, "RAW-STDERR: ***\n")
fmt.Fprintf(l.stderr, "RAW-STDERR:\n") fmt.Fprintf(lg.stderr, "RAW-STDERR:\n")
l.explainedRaw = true lg.explainedRaw = true
} }
fmt.Fprintf(l.stderr, "RAW-STDERR: %s", b) fmt.Fprintf(lg.stderr, "RAW-STDERR: %s", b)
// Do not add a client time, as it could be really old. // Do not add a client time, as it could be really old.
// Do not include instance key or ID either, // Do not include instance key or ID either,
// since this came from a different instance. // since this came from a different instance.
b = l.appendText(b, line, true, 0, 0, 0) b = lg.appendText(b, line, true, 0, 0, 0)
} }
b = append(b, ',') b = append(b, ',')
} }
@ -357,14 +357,14 @@ func (l *Logger) drainPending() (b []byte) {
} }
// This is the goroutine that repeatedly uploads logs in the background. // This is the goroutine that repeatedly uploads logs in the background.
func (l *Logger) uploading(ctx context.Context) { func (lg *Logger) uploading(ctx context.Context) {
defer close(l.shutdownDone) defer close(lg.shutdownDone)
for { for {
body := l.drainPending() body := lg.drainPending()
origlen := -1 // sentinel value: uncompressed origlen := -1 // sentinel value: uncompressed
// Don't attempt to compress tiny bodies; not worth the CPU cycles. // Don't attempt to compress tiny bodies; not worth the CPU cycles.
if l.compressLogs && len(body) > 256 { if lg.compressLogs && len(body) > 256 {
zbody := zstdframe.AppendEncode(nil, body, zbody := zstdframe.AppendEncode(nil, body,
zstdframe.FastestCompression, zstdframe.LowMemory(true)) zstdframe.FastestCompression, zstdframe.LowMemory(true))
@ -381,20 +381,20 @@ func (l *Logger) uploading(ctx context.Context) {
var numFailures int var numFailures int
var firstFailure time.Time var firstFailure time.Time
for len(body) > 0 && ctx.Err() == nil { for len(body) > 0 && ctx.Err() == nil {
retryAfter, err := l.upload(ctx, body, origlen) retryAfter, err := lg.upload(ctx, body, origlen)
if err != nil { if err != nil {
numFailures++ numFailures++
firstFailure = l.clock.Now() firstFailure = lg.clock.Now()
if !l.internetUp() { if !lg.internetUp() {
fmt.Fprintf(l.stderr, "logtail: internet down; waiting\n") fmt.Fprintf(lg.stderr, "logtail: internet down; waiting\n")
l.awaitInternetUp(ctx) lg.awaitInternetUp(ctx)
continue continue
} }
// Only print the same message once. // Only print the same message once.
if currError := err.Error(); lastError != currError { if currError := err.Error(); lastError != currError {
fmt.Fprintf(l.stderr, "logtail: upload: %v\n", err) fmt.Fprintf(lg.stderr, "logtail: upload: %v\n", err)
lastError = currError lastError = currError
} }
@ -407,55 +407,55 @@ func (l *Logger) uploading(ctx context.Context) {
} else { } else {
// Only print a success message after recovery. // Only print a success message after recovery.
if numFailures > 0 { if numFailures > 0 {
fmt.Fprintf(l.stderr, "logtail: upload succeeded after %d failures and %s\n", numFailures, l.clock.Since(firstFailure).Round(time.Second)) fmt.Fprintf(lg.stderr, "logtail: upload succeeded after %d failures and %s\n", numFailures, lg.clock.Since(firstFailure).Round(time.Second))
} }
break break
} }
} }
select { select {
case <-l.shutdownStart: case <-lg.shutdownStart:
return return
default: default:
} }
} }
} }
func (l *Logger) internetUp() bool { func (lg *Logger) internetUp() bool {
select { select {
case <-l.networkIsUp.Ready(): case <-lg.networkIsUp.Ready():
return true return true
default: default:
if l.netMonitor == nil { if lg.netMonitor == nil {
return true // No way to tell, so assume it is. return true // No way to tell, so assume it is.
} }
return l.netMonitor.InterfaceState().AnyInterfaceUp() return lg.netMonitor.InterfaceState().AnyInterfaceUp()
} }
} }
// onChangeDelta is an eventbus subscriber function that handles // onChangeDelta is an eventbus subscriber function that handles
// [netmon.ChangeDelta] events to detect whether the Internet is expected to be // [netmon.ChangeDelta] events to detect whether the Internet is expected to be
// reachable. // reachable.
func (l *Logger) onChangeDelta(delta *netmon.ChangeDelta) { func (lg *Logger) onChangeDelta(delta *netmon.ChangeDelta) {
if delta.New.AnyInterfaceUp() { if delta.New.AnyInterfaceUp() {
fmt.Fprintf(l.stderr, "logtail: internet back up\n") fmt.Fprintf(lg.stderr, "logtail: internet back up\n")
l.networkIsUp.Set() lg.networkIsUp.Set()
} else { } else {
fmt.Fprintf(l.stderr, "logtail: network changed, but is not up\n") fmt.Fprintf(lg.stderr, "logtail: network changed, but is not up\n")
l.networkIsUp.Reset() lg.networkIsUp.Reset()
} }
} }
func (l *Logger) awaitInternetUp(ctx context.Context) { func (lg *Logger) awaitInternetUp(ctx context.Context) {
if l.eventClient != nil { if lg.eventClient != nil {
select { select {
case <-l.networkIsUp.Ready(): case <-lg.networkIsUp.Ready():
case <-ctx.Done(): case <-ctx.Done():
} }
return return
} }
upc := make(chan bool, 1) upc := make(chan bool, 1)
defer l.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { defer lg.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) {
if delta.New.AnyInterfaceUp() { if delta.New.AnyInterfaceUp() {
select { select {
case upc <- true: case upc <- true:
@ -463,12 +463,12 @@ func (l *Logger) awaitInternetUp(ctx context.Context) {
} }
} }
})() })()
if l.internetUp() { if lg.internetUp() {
return return
} }
select { select {
case <-upc: case <-upc:
fmt.Fprintf(l.stderr, "logtail: internet back up\n") fmt.Fprintf(lg.stderr, "logtail: internet back up\n")
case <-ctx.Done(): case <-ctx.Done():
} }
} }
@ -476,13 +476,13 @@ func (l *Logger) awaitInternetUp(ctx context.Context) {
// upload uploads body to the log server. // upload uploads body to the log server.
// origlen indicates the pre-compression body length. // origlen indicates the pre-compression body length.
// origlen of -1 indicates that the body is not compressed. // origlen of -1 indicates that the body is not compressed.
func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAfter time.Duration, err error) { func (lg *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAfter time.Duration, err error) {
const maxUploadTime = 45 * time.Second const maxUploadTime = 45 * time.Second
ctx = sockstats.WithSockStats(ctx, l.sockstatsLabel.Load(), l.Logf) ctx = sockstats.WithSockStats(ctx, lg.sockstatsLabel.Load(), lg.Logf)
ctx, cancel := context.WithTimeout(ctx, maxUploadTime) ctx, cancel := context.WithTimeout(ctx, maxUploadTime)
defer cancel() defer cancel()
req, err := http.NewRequestWithContext(ctx, "POST", l.url, bytes.NewReader(body)) req, err := http.NewRequestWithContext(ctx, "POST", lg.url, bytes.NewReader(body))
if err != nil { if err != nil {
// I know of no conditions under which this could fail. // I know of no conditions under which this could fail.
// Report it very loudly. // Report it very loudly.
@ -513,8 +513,8 @@ func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAft
compressedNote = "compressed" compressedNote = "compressed"
} }
l.httpDoCalls.Add(1) lg.httpDoCalls.Add(1)
resp, err := l.httpc.Do(req) resp, err := lg.httpc.Do(req)
if err != nil { if err != nil {
return 0, fmt.Errorf("log upload of %d bytes %s failed: %v", len(body), compressedNote, err) return 0, fmt.Errorf("log upload of %d bytes %s failed: %v", len(body), compressedNote, err)
} }
@ -533,16 +533,16 @@ func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAft
// //
// TODO(bradfitz): this apparently just returns nil, as of tailscale/corp@9c2ec35. // TODO(bradfitz): this apparently just returns nil, as of tailscale/corp@9c2ec35.
// Finish cleaning this up. // Finish cleaning this up.
func (l *Logger) Flush() error { func (lg *Logger) Flush() error {
return nil return nil
} }
// StartFlush starts a log upload, if anything is pending. // StartFlush starts a log upload, if anything is pending.
// //
// If l is nil, StartFlush is a no-op. // If l is nil, StartFlush is a no-op.
func (l *Logger) StartFlush() { func (lg *Logger) StartFlush() {
if l != nil { if lg != nil {
l.tryDrainWake() lg.tryDrainWake()
} }
} }
@ -558,41 +558,41 @@ var debugWakesAndUploads = envknob.RegisterBool("TS_DEBUG_LOGTAIL_WAKES")
// tryDrainWake tries to send to lg.drainWake, to cause an uploading wakeup. // tryDrainWake tries to send to lg.drainWake, to cause an uploading wakeup.
// It does not block. // It does not block.
func (l *Logger) tryDrainWake() { func (lg *Logger) tryDrainWake() {
l.flushPending.Store(false) lg.flushPending.Store(false)
if debugWakesAndUploads() { if debugWakesAndUploads() {
// Using println instead of log.Printf here to avoid recursing back into // Using println instead of log.Printf here to avoid recursing back into
// ourselves. // ourselves.
println("logtail: try drain wake, numHTTP:", l.httpDoCalls.Load()) println("logtail: try drain wake, numHTTP:", lg.httpDoCalls.Load())
} }
select { select {
case l.drainWake <- struct{}{}: case lg.drainWake <- struct{}{}:
default: default:
} }
} }
func (l *Logger) sendLocked(jsonBlob []byte) (int, error) { func (lg *Logger) sendLocked(jsonBlob []byte) (int, error) {
tapSend(jsonBlob) tapSend(jsonBlob)
if logtailDisabled.Load() { if logtailDisabled.Load() {
return len(jsonBlob), nil return len(jsonBlob), nil
} }
n, err := l.buffer.Write(jsonBlob) n, err := lg.buffer.Write(jsonBlob)
flushDelay := defaultFlushDelay flushDelay := defaultFlushDelay
if l.flushDelayFn != nil { if lg.flushDelayFn != nil {
flushDelay = l.flushDelayFn() flushDelay = lg.flushDelayFn()
} }
if flushDelay > 0 { if flushDelay > 0 {
if l.flushPending.CompareAndSwap(false, true) { if lg.flushPending.CompareAndSwap(false, true) {
if l.flushTimer == nil { if lg.flushTimer == nil {
l.flushTimer = l.clock.AfterFunc(flushDelay, l.tryDrainWake) lg.flushTimer = lg.clock.AfterFunc(flushDelay, lg.tryDrainWake)
} else { } else {
l.flushTimer.Reset(flushDelay) lg.flushTimer.Reset(flushDelay)
} }
} }
} else { } else {
l.tryDrainWake() lg.tryDrainWake()
} }
return n, err return n, err
} }
@ -600,13 +600,13 @@ func (l *Logger) sendLocked(jsonBlob []byte) (int, error) {
// appendMetadata appends optional "logtail", "metrics", and "v" JSON members. // appendMetadata appends optional "logtail", "metrics", and "v" JSON members.
// This assumes dst is already within a JSON object. // This assumes dst is already within a JSON object.
// Each member is comma-terminated. // Each member is comma-terminated.
func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, procID uint32, procSequence uint64, errDetail string, errData jsontext.Value, level int) []byte { func (lg *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, procID uint32, procSequence uint64, errDetail string, errData jsontext.Value, level int) []byte {
// Append optional logtail metadata. // Append optional logtail metadata.
if !skipClientTime || procID != 0 || procSequence != 0 || errDetail != "" || errData != nil { if !skipClientTime || procID != 0 || procSequence != 0 || errDetail != "" || errData != nil {
dst = append(dst, `"logtail":{`...) dst = append(dst, `"logtail":{`...)
if !skipClientTime { if !skipClientTime {
dst = append(dst, `"client_time":"`...) dst = append(dst, `"client_time":"`...)
dst = l.clock.Now().UTC().AppendFormat(dst, time.RFC3339Nano) dst = lg.clock.Now().UTC().AppendFormat(dst, time.RFC3339Nano)
dst = append(dst, '"', ',') dst = append(dst, '"', ',')
} }
if procID != 0 { if procID != 0 {
@ -639,8 +639,8 @@ func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, pr
} }
// Append optional metrics metadata. // Append optional metrics metadata.
if !skipMetrics && l.metricsDelta != nil { if !skipMetrics && lg.metricsDelta != nil {
if d := l.metricsDelta(); d != "" { if d := lg.metricsDelta(); d != "" {
dst = append(dst, `"metrics":"`...) dst = append(dst, `"metrics":"`...)
dst = append(dst, d...) dst = append(dst, d...)
dst = append(dst, '"', ',') dst = append(dst, '"', ',')
@ -660,10 +660,10 @@ func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, pr
} }
// appendText appends a raw text message in the Tailscale JSON log entry format. // appendText appends a raw text message in the Tailscale JSON log entry format.
func (l *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32, procSequence uint64, level int) []byte { func (lg *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32, procSequence uint64, level int) []byte {
dst = slices.Grow(dst, len(src)) dst = slices.Grow(dst, len(src))
dst = append(dst, '{') dst = append(dst, '{')
dst = l.appendMetadata(dst, skipClientTime, false, procID, procSequence, "", nil, level) dst = lg.appendMetadata(dst, skipClientTime, false, procID, procSequence, "", nil, level)
if len(src) == 0 { if len(src) == 0 {
dst = bytes.TrimRight(dst, ",") dst = bytes.TrimRight(dst, ",")
return append(dst, "}\n"...) return append(dst, "}\n"...)
@ -672,7 +672,7 @@ func (l *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32,
// Append the text string, which may be truncated. // Append the text string, which may be truncated.
// Invalid UTF-8 will be mangled with the Unicode replacement character. // Invalid UTF-8 will be mangled with the Unicode replacement character.
max := maxTextSize max := maxTextSize
if l.lowMem { if lg.lowMem {
max /= lowMemRatio max /= lowMemRatio
} }
dst = append(dst, `"text":`...) dst = append(dst, `"text":`...)
@ -697,12 +697,12 @@ func appendTruncatedString(dst, src []byte, n int) []byte {
// appendTextOrJSONLocked appends a raw text message or a raw JSON object // appendTextOrJSONLocked appends a raw text message or a raw JSON object
// in the Tailscale JSON log format. // in the Tailscale JSON log format.
func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { func (lg *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte {
if l.includeProcSequence { if lg.includeProcSequence {
l.procSequence++ lg.procSequence++
} }
if len(src) == 0 || src[0] != '{' { if len(src) == 0 || src[0] != '{' {
return l.appendText(dst, src, l.skipClientTime, l.procID, l.procSequence, level) return lg.appendText(dst, src, lg.skipClientTime, lg.procID, lg.procSequence, level)
} }
// Check whether the input is a valid JSON object and // Check whether the input is a valid JSON object and
@ -714,11 +714,11 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte {
// However, bytes.NewBuffer normally allocates unless // However, bytes.NewBuffer normally allocates unless
// we immediately shallow copy it into a pre-allocated Buffer struct. // we immediately shallow copy it into a pre-allocated Buffer struct.
// See https://go.dev/issue/67004. // See https://go.dev/issue/67004.
l.bytesBuf = *bytes.NewBuffer(src) lg.bytesBuf = *bytes.NewBuffer(src)
defer func() { l.bytesBuf = bytes.Buffer{} }() // avoid pinning src defer func() { lg.bytesBuf = bytes.Buffer{} }() // avoid pinning src
dec := &l.jsonDec dec := &lg.jsonDec
dec.Reset(&l.bytesBuf) dec.Reset(&lg.bytesBuf)
if tok, err := dec.ReadToken(); tok.Kind() != '{' || err != nil { if tok, err := dec.ReadToken(); tok.Kind() != '{' || err != nil {
return false return false
} }
@ -750,7 +750,7 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte {
// Treat invalid JSON as a raw text message. // Treat invalid JSON as a raw text message.
if !validJSON { if !validJSON {
return l.appendText(dst, src, l.skipClientTime, l.procID, l.procSequence, level) return lg.appendText(dst, src, lg.skipClientTime, lg.procID, lg.procSequence, level)
} }
// Check whether the JSON payload is too large. // Check whether the JSON payload is too large.
@ -758,13 +758,13 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte {
// That's okay as the Tailscale log service limit is actually 2*maxSize. // That's okay as the Tailscale log service limit is actually 2*maxSize.
// However, so long as logging applications aim to target the maxSize limit, // However, so long as logging applications aim to target the maxSize limit,
// there should be no trouble eventually uploading logs. // there should be no trouble eventually uploading logs.
maxLen := cmp.Or(l.maxUploadSize, maxSize) maxLen := cmp.Or(lg.maxUploadSize, maxSize)
if len(src) > maxLen { if len(src) > maxLen {
errDetail := fmt.Sprintf("entry too large: %d bytes", len(src)) errDetail := fmt.Sprintf("entry too large: %d bytes", len(src))
errData := appendTruncatedString(nil, src, maxLen/len(`\uffff`)) // escaping could increase size errData := appendTruncatedString(nil, src, maxLen/len(`\uffff`)) // escaping could increase size
dst = append(dst, '{') dst = append(dst, '{')
dst = l.appendMetadata(dst, l.skipClientTime, true, l.procID, l.procSequence, errDetail, errData, level) dst = lg.appendMetadata(dst, lg.skipClientTime, true, lg.procID, lg.procSequence, errDetail, errData, level)
dst = bytes.TrimRight(dst, ",") dst = bytes.TrimRight(dst, ",")
return append(dst, "}\n"...) return append(dst, "}\n"...)
} }
@ -781,7 +781,7 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte {
} }
dst = slices.Grow(dst, len(src)) dst = slices.Grow(dst, len(src))
dst = append(dst, '{') dst = append(dst, '{')
dst = l.appendMetadata(dst, l.skipClientTime, true, l.procID, l.procSequence, errDetail, errData, level) dst = lg.appendMetadata(dst, lg.skipClientTime, true, lg.procID, lg.procSequence, errDetail, errData, level)
if logtailValLength > 0 { if logtailValLength > 0 {
// Exclude original logtail member from the message. // Exclude original logtail member from the message.
dst = appendWithoutNewline(dst, src[len("{"):logtailKeyOffset]) dst = appendWithoutNewline(dst, src[len("{"):logtailKeyOffset])
@ -808,8 +808,8 @@ func appendWithoutNewline(dst, src []byte) []byte {
} }
// Logf logs to l using the provided fmt-style format and optional arguments. // Logf logs to l using the provided fmt-style format and optional arguments.
func (l *Logger) Logf(format string, args ...any) { func (lg *Logger) Logf(format string, args ...any) {
fmt.Fprintf(l, format, args...) fmt.Fprintf(lg, format, args...)
} }
// Write logs an encoded JSON blob. // Write logs an encoded JSON blob.
@ -818,29 +818,29 @@ func (l *Logger) Logf(format string, args ...any) {
// then contents is fit into a JSON blob and written. // then contents is fit into a JSON blob and written.
// //
// This is intended as an interface for the stdlib "log" package. // This is intended as an interface for the stdlib "log" package.
func (l *Logger) Write(buf []byte) (int, error) { func (lg *Logger) Write(buf []byte) (int, error) {
if len(buf) == 0 { if len(buf) == 0 {
return 0, nil return 0, nil
} }
inLen := len(buf) // length as provided to us, before modifications to downstream writers inLen := len(buf) // length as provided to us, before modifications to downstream writers
level, buf := parseAndRemoveLogLevel(buf) level, buf := parseAndRemoveLogLevel(buf)
if l.stderr != nil && l.stderr != io.Discard && int64(level) <= atomic.LoadInt64(&l.stderrLevel) { if lg.stderr != nil && lg.stderr != io.Discard && int64(level) <= atomic.LoadInt64(&lg.stderrLevel) {
if buf[len(buf)-1] == '\n' { if buf[len(buf)-1] == '\n' {
l.stderr.Write(buf) lg.stderr.Write(buf)
} else { } else {
// The log package always line-terminates logs, // The log package always line-terminates logs,
// so this is an uncommon path. // so this is an uncommon path.
withNL := append(buf[:len(buf):len(buf)], '\n') withNL := append(buf[:len(buf):len(buf)], '\n')
l.stderr.Write(withNL) lg.stderr.Write(withNL)
} }
} }
l.writeLock.Lock() lg.writeLock.Lock()
defer l.writeLock.Unlock() defer lg.writeLock.Unlock()
b := l.appendTextOrJSONLocked(l.writeBuf[:0], buf, level) b := lg.appendTextOrJSONLocked(lg.writeBuf[:0], buf, level)
_, err := l.sendLocked(b) _, err := lg.sendLocked(b)
return inLen, err return inLen, err
} }

@ -29,11 +29,11 @@ func TestFastShutdown(t *testing.T) {
func(w http.ResponseWriter, r *http.Request) {})) func(w http.ResponseWriter, r *http.Request) {}))
defer testServ.Close() defer testServ.Close()
l := NewLogger(Config{ logger := NewLogger(Config{
BaseURL: testServ.URL, BaseURL: testServ.URL,
Bus: eventbustest.NewBus(t), Bus: eventbustest.NewBus(t),
}, t.Logf) }, t.Logf)
err := l.Shutdown(ctx) err := logger.Shutdown(ctx)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -64,7 +64,7 @@ func NewLogtailTestHarness(t *testing.T) (*LogtailTestServer, *Logger) {
t.Cleanup(ts.srv.Close) t.Cleanup(ts.srv.Close)
l := NewLogger(Config{ logger := NewLogger(Config{
BaseURL: ts.srv.URL, BaseURL: ts.srv.URL,
Bus: eventbustest.NewBus(t), Bus: eventbustest.NewBus(t),
}, t.Logf) }, t.Logf)
@ -75,14 +75,14 @@ func NewLogtailTestHarness(t *testing.T) (*LogtailTestServer, *Logger) {
t.Errorf("unknown start logging statement: %q", string(body)) t.Errorf("unknown start logging statement: %q", string(body))
} }
return &ts, l return &ts, logger
} }
func TestDrainPendingMessages(t *testing.T) { func TestDrainPendingMessages(t *testing.T) {
ts, l := NewLogtailTestHarness(t) ts, logger := NewLogtailTestHarness(t)
for range logLines { for range logLines {
l.Write([]byte("log line")) logger.Write([]byte("log line"))
} }
// all of the "log line" messages usually arrive at once, but poll if needed. // all of the "log line" messages usually arrive at once, but poll if needed.
@ -96,14 +96,14 @@ func TestDrainPendingMessages(t *testing.T) {
// if we never find count == logLines, the test will eventually time out. // if we never find count == logLines, the test will eventually time out.
} }
err := l.Shutdown(context.Background()) err := logger.Shutdown(context.Background())
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
} }
func TestEncodeAndUploadMessages(t *testing.T) { func TestEncodeAndUploadMessages(t *testing.T) {
ts, l := NewLogtailTestHarness(t) ts, logger := NewLogtailTestHarness(t)
tests := []struct { tests := []struct {
name string name string
@ -123,7 +123,7 @@ func TestEncodeAndUploadMessages(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
io.WriteString(l, tt.log) io.WriteString(logger, tt.log)
body := <-ts.uploaded body := <-ts.uploaded
data := unmarshalOne(t, body) data := unmarshalOne(t, body)
@ -144,7 +144,7 @@ func TestEncodeAndUploadMessages(t *testing.T) {
} }
} }
err := l.Shutdown(context.Background()) err := logger.Shutdown(context.Background())
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -322,9 +322,9 @@ func TestLoggerWriteResult(t *testing.T) {
} }
func TestAppendMetadata(t *testing.T) { func TestAppendMetadata(t *testing.T) {
var l Logger var lg Logger
l.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) lg.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)})
l.metricsDelta = func() string { return "metrics" } lg.metricsDelta = func() string { return "metrics" }
for _, tt := range []struct { for _, tt := range []struct {
skipClientTime bool skipClientTime bool
@ -350,7 +350,7 @@ func TestAppendMetadata(t *testing.T) {
{procID: 1, procSeq: 2, errDetail: "error", errData: jsontext.Value(`["something","bad","happened"]`), level: 2, {procID: 1, procSeq: 2, errDetail: "error", errData: jsontext.Value(`["something","bad","happened"]`), level: 2,
want: `"logtail":{"client_time":"2000-01-01T00:00:00Z","proc_id":1,"proc_seq":2,"error":{"detail":"error","bad_data":["something","bad","happened"]}},"metrics":"metrics","v":2,`}, want: `"logtail":{"client_time":"2000-01-01T00:00:00Z","proc_id":1,"proc_seq":2,"error":{"detail":"error","bad_data":["something","bad","happened"]}},"metrics":"metrics","v":2,`},
} { } {
got := string(l.appendMetadata(nil, tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level)) got := string(lg.appendMetadata(nil, tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level))
if got != tt.want { if got != tt.want {
t.Errorf("appendMetadata(%v, %v, %v, %v, %v, %v, %v):\n\tgot %s\n\twant %s", tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level, got, tt.want) t.Errorf("appendMetadata(%v, %v, %v, %v, %v, %v, %v):\n\tgot %s\n\twant %s", tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level, got, tt.want)
} }
@ -362,10 +362,10 @@ func TestAppendMetadata(t *testing.T) {
} }
func TestAppendText(t *testing.T) { func TestAppendText(t *testing.T) {
var l Logger var lg Logger
l.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) lg.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)})
l.metricsDelta = func() string { return "metrics" } lg.metricsDelta = func() string { return "metrics" }
l.lowMem = true lg.lowMem = true
for _, tt := range []struct { for _, tt := range []struct {
text string text string
@ -382,7 +382,7 @@ func TestAppendText(t *testing.T) {
{text: "\b\f\n\r\t\"\\", want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z"},"metrics":"metrics","text":"\b\f\n\r\t\"\\"}`}, {text: "\b\f\n\r\t\"\\", want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z"},"metrics":"metrics","text":"\b\f\n\r\t\"\\"}`},
{text: "x" + strings.Repeat("😐", maxSize), want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z"},"metrics":"metrics","text":"x` + strings.Repeat("😐", 1023) + `…+1044484"}`}, {text: "x" + strings.Repeat("😐", maxSize), want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z"},"metrics":"metrics","text":"x` + strings.Repeat("😐", 1023) + `…+1044484"}`},
} { } {
got := string(l.appendText(nil, []byte(tt.text), tt.skipClientTime, tt.procID, tt.procSeq, tt.level)) got := string(lg.appendText(nil, []byte(tt.text), tt.skipClientTime, tt.procID, tt.procSeq, tt.level))
if !strings.HasSuffix(got, "\n") { if !strings.HasSuffix(got, "\n") {
t.Errorf("`%s` does not end with a newline", got) t.Errorf("`%s` does not end with a newline", got)
} }
@ -397,10 +397,10 @@ func TestAppendText(t *testing.T) {
} }
func TestAppendTextOrJSON(t *testing.T) { func TestAppendTextOrJSON(t *testing.T) {
var l Logger var lg Logger
l.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) lg.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)})
l.metricsDelta = func() string { return "metrics" } lg.metricsDelta = func() string { return "metrics" }
l.lowMem = true lg.lowMem = true
for _, tt := range []struct { for _, tt := range []struct {
in string in string
@ -419,7 +419,7 @@ func TestAppendTextOrJSON(t *testing.T) {
{in: `{ "fizz" : "buzz" , "logtail" : "duplicate" , "wizz" : "wuzz" }`, want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z","error":{"detail":"duplicate logtail member","bad_data":"duplicate"}}, "fizz" : "buzz" , "wizz" : "wuzz"}`}, {in: `{ "fizz" : "buzz" , "logtail" : "duplicate" , "wizz" : "wuzz" }`, want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z","error":{"detail":"duplicate logtail member","bad_data":"duplicate"}}, "fizz" : "buzz" , "wizz" : "wuzz"}`},
{in: `{"long":"` + strings.Repeat("a", maxSize) + `"}`, want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z","error":{"detail":"entry too large: 262155 bytes","bad_data":"{\"long\":\"` + strings.Repeat("a", 43681) + `…+218465"}}}`}, {in: `{"long":"` + strings.Repeat("a", maxSize) + `"}`, want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z","error":{"detail":"entry too large: 262155 bytes","bad_data":"{\"long\":\"` + strings.Repeat("a", 43681) + `…+218465"}}}`},
} { } {
got := string(l.appendTextOrJSONLocked(nil, []byte(tt.in), tt.level)) got := string(lg.appendTextOrJSONLocked(nil, []byte(tt.in), tt.level))
if !strings.HasSuffix(got, "\n") { if !strings.HasSuffix(got, "\n") {
t.Errorf("`%s` does not end with a newline", got) t.Errorf("`%s` does not end with a newline", got)
} }
@ -461,21 +461,21 @@ var testdataTextLog = []byte(`netcheck: report: udp=true v6=false v6os=true mapv
var testdataJSONLog = []byte(`{"end":"2024-04-08T21:39:15.715291586Z","nodeId":"nQRJBE7CNTRL","physicalTraffic":[{"dst":"127.x.x.x:2","src":"100.x.x.x:0","txBytes":148,"txPkts":1},{"dst":"127.x.x.x:2","src":"100.x.x.x:0","txBytes":148,"txPkts":1},{"dst":"98.x.x.x:1025","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"24.x.x.x:49973","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"73.x.x.x:41641","rxBytes":732,"rxPkts":6,"src":"100.x.x.x:0","txBytes":820,"txPkts":7},{"dst":"75.x.x.x:1025","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"75.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"174.x.x.x:35497","rxBytes":13008,"rxPkts":98,"src":"100.x.x.x:0","txBytes":26688,"txPkts":150},{"dst":"47.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"64.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5}],"start":"2024-04-08T21:39:11.099495616Z","virtualTraffic":[{"dst":"100.x.x.x:33008","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32984","proto":6,"src":"100.x.x.x:22","txBytes":1340,"txPkts":10},{"dst":"100.x.x.x:32998","proto":6,"src":"100.x.x.x:22","txBytes":1020,"txPkts":10},{"dst":"100.x.x.x:32994","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:32980","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32950","proto":6,"src":"100.x.x.x:22","txBytes":1340,"txPkts":10},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:53332","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:0","proto":1,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32966","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:57882","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:53326","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:57892","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:32934","proto":6,"src":"100.x.x.x:22","txBytes":8712,"txPkts":55},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32942","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32964","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:37238","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:37252","txBytes":60,"txPkts":1}]}`) var testdataJSONLog = []byte(`{"end":"2024-04-08T21:39:15.715291586Z","nodeId":"nQRJBE7CNTRL","physicalTraffic":[{"dst":"127.x.x.x:2","src":"100.x.x.x:0","txBytes":148,"txPkts":1},{"dst":"127.x.x.x:2","src":"100.x.x.x:0","txBytes":148,"txPkts":1},{"dst":"98.x.x.x:1025","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"24.x.x.x:49973","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"73.x.x.x:41641","rxBytes":732,"rxPkts":6,"src":"100.x.x.x:0","txBytes":820,"txPkts":7},{"dst":"75.x.x.x:1025","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"75.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"174.x.x.x:35497","rxBytes":13008,"rxPkts":98,"src":"100.x.x.x:0","txBytes":26688,"txPkts":150},{"dst":"47.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"64.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5}],"start":"2024-04-08T21:39:11.099495616Z","virtualTraffic":[{"dst":"100.x.x.x:33008","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32984","proto":6,"src":"100.x.x.x:22","txBytes":1340,"txPkts":10},{"dst":"100.x.x.x:32998","proto":6,"src":"100.x.x.x:22","txBytes":1020,"txPkts":10},{"dst":"100.x.x.x:32994","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:32980","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32950","proto":6,"src":"100.x.x.x:22","txBytes":1340,"txPkts":10},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:53332","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:0","proto":1,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32966","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:57882","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:53326","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:57892","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:32934","proto":6,"src":"100.x.x.x:22","txBytes":8712,"txPkts":55},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32942","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32964","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:37238","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:37252","txBytes":60,"txPkts":1}]}`)
func BenchmarkWriteText(b *testing.B) { func BenchmarkWriteText(b *testing.B) {
var l Logger var lg Logger
l.clock = tstime.StdClock{} lg.clock = tstime.StdClock{}
l.buffer = discardBuffer{} lg.buffer = discardBuffer{}
b.ReportAllocs() b.ReportAllocs()
for range b.N { for range b.N {
must.Get(l.Write(testdataTextLog)) must.Get(lg.Write(testdataTextLog))
} }
} }
func BenchmarkWriteJSON(b *testing.B) { func BenchmarkWriteJSON(b *testing.B) {
var l Logger var lg Logger
l.clock = tstime.StdClock{} lg.clock = tstime.StdClock{}
l.buffer = discardBuffer{} lg.buffer = discardBuffer{}
b.ReportAllocs() b.ReportAllocs()
for range b.N { for range b.N {
must.Get(l.Write(testdataJSONLog)) must.Get(lg.Write(testdataJSONLog))
} }
} }

@ -303,21 +303,21 @@ func formatPrefixTable(addr uint8, len int) string {
// //
// For example, childPrefixOf("192.168.0.0/16", 8) == "192.168.8.0/24". // For example, childPrefixOf("192.168.0.0/16", 8) == "192.168.8.0/24".
func childPrefixOf(parent netip.Prefix, stride uint8) netip.Prefix { func childPrefixOf(parent netip.Prefix, stride uint8) netip.Prefix {
l := parent.Bits() ln := parent.Bits()
if l%8 != 0 { if ln%8 != 0 {
panic("parent prefix is not 8-bit aligned") panic("parent prefix is not 8-bit aligned")
} }
if l >= parent.Addr().BitLen() { if ln >= parent.Addr().BitLen() {
panic("parent prefix cannot be extended further") panic("parent prefix cannot be extended further")
} }
off := l / 8 off := ln / 8
if parent.Addr().Is4() { if parent.Addr().Is4() {
bs := parent.Addr().As4() bs := parent.Addr().As4()
bs[off] = stride bs[off] = stride
return netip.PrefixFrom(netip.AddrFrom4(bs), l+8) return netip.PrefixFrom(netip.AddrFrom4(bs), ln+8)
} else { } else {
bs := parent.Addr().As16() bs := parent.Addr().As16()
bs[off] = stride bs[off] = stride
return netip.PrefixFrom(netip.AddrFrom16(bs), l+8) return netip.PrefixFrom(netip.AddrFrom16(bs), ln+8)
} }
} }

@ -377,8 +377,8 @@ func pfxMask(pfxLen int) uint8 {
func allPrefixes() []slowEntry[int] { func allPrefixes() []slowEntry[int] {
ret := make([]slowEntry[int], 0, lastHostIndex) ret := make([]slowEntry[int], 0, lastHostIndex)
for i := 1; i < lastHostIndex+1; i++ { for i := 1; i < lastHostIndex+1; i++ {
a, l := inversePrefixIndex(i) a, ln := inversePrefixIndex(i)
ret = append(ret, slowEntry[int]{a, l, i}) ret = append(ret, slowEntry[int]{a, ln, i})
} }
return ret return ret
} }

@ -550,8 +550,8 @@ func genRandomSubdomains(t *testing.T, n int) []dnsname.FQDN {
const charset = "abcdefghijklmnopqrstuvwxyz" const charset = "abcdefghijklmnopqrstuvwxyz"
for len(domains) < cap(domains) { for len(domains) < cap(domains) {
l := r.Intn(19) + 1 ln := r.Intn(19) + 1
b := make([]byte, l) b := make([]byte, ln)
for i := range b { for i := range b {
b[i] = charset[r.Intn(len(charset))] b[i] = charset[r.Intn(len(charset))]
} }

@ -19,11 +19,11 @@ func TestSetUserTimeout(t *testing.T) {
// set in ktimeout.UserTimeout above. // set in ktimeout.UserTimeout above.
lc.SetMultipathTCP(false) lc.SetMultipathTCP(false)
l := must.Get(lc.Listen(context.Background(), "tcp", "localhost:0")) ln := must.Get(lc.Listen(context.Background(), "tcp", "localhost:0"))
defer l.Close() defer ln.Close()
var err error var err error
if e := must.Get(l.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) { if e := must.Get(ln.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) {
err = SetUserTimeout(fd, 0) err = SetUserTimeout(fd, 0)
}); e != nil { }); e != nil {
t.Fatal(e) t.Fatal(e)
@ -31,12 +31,12 @@ func TestSetUserTimeout(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
v := must.Get(unix.GetsockoptInt(int(must.Get(l.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT)) v := must.Get(unix.GetsockoptInt(int(must.Get(ln.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT))
if v != 0 { if v != 0 {
t.Errorf("TCP_USER_TIMEOUT: got %v; want 0", v) t.Errorf("TCP_USER_TIMEOUT: got %v; want 0", v)
} }
if e := must.Get(l.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) { if e := must.Get(ln.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) {
err = SetUserTimeout(fd, 30*time.Second) err = SetUserTimeout(fd, 30*time.Second)
}); e != nil { }); e != nil {
t.Fatal(e) t.Fatal(e)
@ -44,7 +44,7 @@ func TestSetUserTimeout(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
v = must.Get(unix.GetsockoptInt(int(must.Get(l.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT)) v = must.Get(unix.GetsockoptInt(int(must.Get(ln.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT))
if v != 30000 { if v != 30000 {
t.Errorf("TCP_USER_TIMEOUT: got %v; want 30000", v) t.Errorf("TCP_USER_TIMEOUT: got %v; want 30000", v)
} }

@ -14,11 +14,11 @@ func ExampleUserTimeout() {
lc := net.ListenConfig{ lc := net.ListenConfig{
Control: UserTimeout(30 * time.Second), Control: UserTimeout(30 * time.Second),
} }
l, err := lc.Listen(context.TODO(), "tcp", "127.0.0.1:0") ln, err := lc.Listen(context.TODO(), "tcp", "127.0.0.1:0")
if err != nil { if err != nil {
fmt.Printf("error: %v", err) fmt.Printf("error: %v", err)
return return
} }
l.Close() ln.Close()
// Output: // Output:
} }

@ -39,16 +39,16 @@ func Listen(addr string) *Listener {
} }
// Addr implements net.Listener.Addr. // Addr implements net.Listener.Addr.
func (l *Listener) Addr() net.Addr { func (ln *Listener) Addr() net.Addr {
return l.addr return ln.addr
} }
// Close closes the pipe listener. // Close closes the pipe listener.
func (l *Listener) Close() error { func (ln *Listener) Close() error {
var cleanup func() var cleanup func()
l.closeOnce.Do(func() { ln.closeOnce.Do(func() {
cleanup = l.onClose cleanup = ln.onClose
close(l.closed) close(ln.closed)
}) })
if cleanup != nil { if cleanup != nil {
cleanup() cleanup()
@ -57,11 +57,11 @@ func (l *Listener) Close() error {
} }
// Accept blocks until a new connection is available or the listener is closed. // Accept blocks until a new connection is available or the listener is closed.
func (l *Listener) Accept() (net.Conn, error) { func (ln *Listener) Accept() (net.Conn, error) {
select { select {
case c := <-l.ch: case c := <-ln.ch:
return c, nil return c, nil
case <-l.closed: case <-ln.closed:
return nil, net.ErrClosed return nil, net.ErrClosed
} }
} }
@ -70,18 +70,18 @@ func (l *Listener) Accept() (net.Conn, error) {
// The provided Context must be non-nil. If the context expires before the // The provided Context must be non-nil. If the context expires before the
// connection is complete, an error is returned. Once successfully connected // connection is complete, an error is returned. Once successfully connected
// any expiration of the context will not affect the connection. // any expiration of the context will not affect the connection.
func (l *Listener) Dial(ctx context.Context, network, addr string) (_ net.Conn, err error) { func (ln *Listener) Dial(ctx context.Context, network, addr string) (_ net.Conn, err error) {
if !strings.HasSuffix(network, "tcp") { if !strings.HasSuffix(network, "tcp") {
return nil, net.UnknownNetworkError(network) return nil, net.UnknownNetworkError(network)
} }
if connAddr(addr) != l.addr { if connAddr(addr) != ln.addr {
return nil, &net.AddrError{ return nil, &net.AddrError{
Err: "invalid address", Err: "invalid address",
Addr: addr, Addr: addr,
} }
} }
newConn := l.NewConn newConn := ln.NewConn
if newConn == nil { if newConn == nil {
newConn = func(network, addr string, maxBuf int) (Conn, Conn) { newConn = func(network, addr string, maxBuf int) (Conn, Conn) {
return NewConn(addr, maxBuf) return NewConn(addr, maxBuf)
@ -98,9 +98,9 @@ func (l *Listener) Dial(ctx context.Context, network, addr string) (_ net.Conn,
select { select {
case <-ctx.Done(): case <-ctx.Done():
return nil, ctx.Err() return nil, ctx.Err()
case <-l.closed: case <-ln.closed:
return nil, net.ErrClosed return nil, net.ErrClosed
case l.ch <- s: case ln.ch <- s:
return c, nil return c, nil
} }
} }

@ -9,10 +9,10 @@ import (
) )
func TestListener(t *testing.T) { func TestListener(t *testing.T) {
l := Listen("srv.local") ln := Listen("srv.local")
defer l.Close() defer ln.Close()
go func() { go func() {
c, err := l.Accept() c, err := ln.Accept()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -20,11 +20,11 @@ func TestListener(t *testing.T) {
defer c.Close() defer c.Close()
}() }()
if c, err := l.Dial(context.Background(), "tcp", "invalid"); err == nil { if c, err := ln.Dial(context.Background(), "tcp", "invalid"); err == nil {
c.Close() c.Close()
t.Fatalf("dial to invalid address succeeded") t.Fatalf("dial to invalid address succeeded")
} }
c, err := l.Dial(context.Background(), "tcp", "srv.local") c, err := ln.Dial(context.Background(), "tcp", "srv.local")
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
return return

@ -34,7 +34,7 @@ func FromStdIPNet(std *net.IPNet) (prefix netip.Prefix, ok bool) {
} }
ip = ip.Unmap() ip = ip.Unmap()
if l := len(std.Mask); l != net.IPv4len && l != net.IPv6len { if ln := len(std.Mask); ln != net.IPv4len && ln != net.IPv6len {
// Invalid mask. // Invalid mask.
return netip.Prefix{}, false return netip.Prefix{}, false
} }

@ -993,9 +993,9 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe
c.logf("[v1] netcheck: measuring HTTPS latency of %v (%d): %v", reg.RegionCode, reg.RegionID, err) c.logf("[v1] netcheck: measuring HTTPS latency of %v (%d): %v", reg.RegionCode, reg.RegionID, err)
} else { } else {
rs.mu.Lock() rs.mu.Lock()
if l, ok := rs.report.RegionLatency[reg.RegionID]; !ok { if latency, ok := rs.report.RegionLatency[reg.RegionID]; !ok {
mak.Set(&rs.report.RegionLatency, reg.RegionID, d) mak.Set(&rs.report.RegionLatency, reg.RegionID, d)
} else if l >= d { } else if latency >= d {
rs.report.RegionLatency[reg.RegionID] = d rs.report.RegionLatency[reg.RegionID] = d
} }
// We set these IPv4 and IPv6 but they're not really used // We set these IPv4 and IPv6 but they're not really used
@ -1214,9 +1214,9 @@ func (c *Client) measureAllICMPLatency(ctx context.Context, rs *reportState, nee
} else if ok { } else if ok {
c.logf("[v1] ICMP latency of %v (%d): %v", reg.RegionCode, reg.RegionID, d) c.logf("[v1] ICMP latency of %v (%d): %v", reg.RegionCode, reg.RegionID, d)
rs.mu.Lock() rs.mu.Lock()
if l, ok := rs.report.RegionLatency[reg.RegionID]; !ok { if latency, ok := rs.report.RegionLatency[reg.RegionID]; !ok {
mak.Set(&rs.report.RegionLatency, reg.RegionID, d) mak.Set(&rs.report.RegionLatency, reg.RegionID, d)
} else if l >= d { } else if latency >= d {
rs.report.RegionLatency[reg.RegionID] = d rs.report.RegionLatency[reg.RegionID] = d
} }

@ -120,10 +120,10 @@ func (s *Server) logf(format string, args ...any) {
} }
// Serve accepts and handles incoming connections on the given listener. // Serve accepts and handles incoming connections on the given listener.
func (s *Server) Serve(l net.Listener) error { func (s *Server) Serve(ln net.Listener) error {
defer l.Close() defer ln.Close()
for { for {
c, err := l.Accept() c, err := ln.Accept()
if err != nil { if err != nil {
return err return err
} }

@ -17,9 +17,9 @@ import (
// connections and handles each one in a goroutine. Because it runs in an infinite loop, // connections and handles each one in a goroutine. Because it runs in an infinite loop,
// this function only returns if any of the speedtests return with errors, or if the // this function only returns if any of the speedtests return with errors, or if the
// listener is closed. // listener is closed.
func Serve(l net.Listener) error { func Serve(ln net.Listener) error {
for { for {
conn, err := l.Accept() conn, err := ln.Accept()
if errors.Is(err, net.ErrClosed) { if errors.Is(err, net.ErrClosed) {
return nil return nil
} }

@ -21,13 +21,13 @@ func TestDownload(t *testing.T) {
flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17338") flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17338")
// start a listener and find the port where the server will be listening. // start a listener and find the port where the server will be listening.
l, err := net.Listen("tcp", ":0") ln, err := net.Listen("tcp", ":0")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
t.Cleanup(func() { l.Close() }) t.Cleanup(func() { ln.Close() })
serverIP := l.Addr().String() serverIP := ln.Addr().String()
t.Log("server IP found:", serverIP) t.Log("server IP found:", serverIP)
type state struct { type state struct {
@ -40,7 +40,7 @@ func TestDownload(t *testing.T) {
stateChan := make(chan state, 1) stateChan := make(chan state, 1)
go func() { go func() {
err := Serve(l) err := Serve(ln)
stateChan <- state{err: err} stateChan <- state{err: err}
}() }()
@ -84,7 +84,7 @@ func TestDownload(t *testing.T) {
}) })
// causes the server goroutine to finish // causes the server goroutine to finish
l.Close() ln.Close()
testState := <-stateChan testState := <-stateChan
if testState.err != nil { if testState.err != nil {

@ -166,14 +166,14 @@ var (
func findArchAndVersion(control []byte) (arch string, version string, err error) { func findArchAndVersion(control []byte) (arch string, version string, err error) {
b := bytes.NewBuffer(control) b := bytes.NewBuffer(control)
for { for {
l, err := b.ReadBytes('\n') ln, err := b.ReadBytes('\n')
if err != nil { if err != nil {
return "", "", err return "", "", err
} }
if bytes.HasPrefix(l, archKey) { if bytes.HasPrefix(ln, archKey) {
arch = string(bytes.TrimSpace(l[len(archKey):])) arch = string(bytes.TrimSpace(ln[len(archKey):]))
} else if bytes.HasPrefix(l, versionKey) { } else if bytes.HasPrefix(ln, versionKey) {
version = string(bytes.TrimSpace(l[len(versionKey):])) version = string(bytes.TrimSpace(ln[len(versionKey):]))
} }
if arch != "" && version != "" { if arch != "" && version != "" {
return arch, version, nil return arch, version, nil

@ -323,14 +323,14 @@ func (d *derpProber) probeBandwidth(from, to string, size int64) ProbeClass {
"derp_path": derpPath, "derp_path": derpPath,
"tcp_in_tcp": strconv.FormatBool(d.bwTUNIPv4Prefix != nil), "tcp_in_tcp": strconv.FormatBool(d.bwTUNIPv4Prefix != nil),
}, },
Metrics: func(l prometheus.Labels) []prometheus.Metric { Metrics: func(lb prometheus.Labels) []prometheus.Metric {
metrics := []prometheus.Metric{ metrics := []prometheus.Metric{
prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_probe_size_bytes", "Payload size of the bandwidth prober", nil, l), prometheus.GaugeValue, float64(size)), prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_probe_size_bytes", "Payload size of the bandwidth prober", nil, lb), prometheus.GaugeValue, float64(size)),
prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_transfer_time_seconds_total", "Time it took to transfer data", nil, l), prometheus.CounterValue, transferTimeSeconds.Value()), prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_transfer_time_seconds_total", "Time it took to transfer data", nil, lb), prometheus.CounterValue, transferTimeSeconds.Value()),
} }
if d.bwTUNIPv4Prefix != nil { if d.bwTUNIPv4Prefix != nil {
// For TCP-in-TCP probes, also record cumulative bytes transferred. // For TCP-in-TCP probes, also record cumulative bytes transferred.
metrics = append(metrics, prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_bytes_total", "Amount of data transferred", nil, l), prometheus.CounterValue, totalBytesTransferred.Value())) metrics = append(metrics, prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_bytes_total", "Amount of data transferred", nil, lb), prometheus.CounterValue, totalBytesTransferred.Value()))
} }
return metrics return metrics
}, },
@ -361,11 +361,11 @@ func (d *derpProber) probeQueuingDelay(from, to string, packetsPerSecond int, pa
}, },
Class: "derp_qd", Class: "derp_qd",
Labels: Labels{"derp_path": derpPath}, Labels: Labels{"derp_path": derpPath},
Metrics: func(l prometheus.Labels) []prometheus.Metric { Metrics: func(lb prometheus.Labels) []prometheus.Metric {
qdh.mx.Lock() qdh.mx.Lock()
result := []prometheus.Metric{ result := []prometheus.Metric{
prometheus.MustNewConstMetric(prometheus.NewDesc("derp_qd_probe_dropped_packets", "Total packets dropped", nil, l), prometheus.CounterValue, float64(packetsDropped.Value())), prometheus.MustNewConstMetric(prometheus.NewDesc("derp_qd_probe_dropped_packets", "Total packets dropped", nil, lb), prometheus.CounterValue, float64(packetsDropped.Value())),
prometheus.MustNewConstHistogram(prometheus.NewDesc("derp_qd_probe_delays_seconds", "Distribution of queuing delays", nil, l), qdh.count, qdh.sum, maps.Clone(qdh.bucketedCounts)), prometheus.MustNewConstHistogram(prometheus.NewDesc("derp_qd_probe_delays_seconds", "Distribution of queuing delays", nil, lb), qdh.count, qdh.sum, maps.Clone(qdh.bucketedCounts)),
} }
qdh.mx.Unlock() qdh.mx.Unlock()
return result return result
@ -1046,11 +1046,11 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT
}() }()
// Start a listener to receive the data // Start a listener to receive the data
l, err := net.Listen("tcp", net.JoinHostPort(ifAddr.String(), "0")) ln, err := net.Listen("tcp", net.JoinHostPort(ifAddr.String(), "0"))
if err != nil { if err != nil {
return fmt.Errorf("failed to listen: %s", err) return fmt.Errorf("failed to listen: %s", err)
} }
defer l.Close() defer ln.Close()
// 128KB by default // 128KB by default
const writeChunkSize = 128 << 10 const writeChunkSize = 128 << 10
@ -1062,9 +1062,9 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT
} }
// Dial ourselves // Dial ourselves
_, port, err := net.SplitHostPort(l.Addr().String()) _, port, err := net.SplitHostPort(ln.Addr().String())
if err != nil { if err != nil {
return fmt.Errorf("failed to split address %q: %w", l.Addr().String(), err) return fmt.Errorf("failed to split address %q: %w", ln.Addr().String(), err)
} }
connAddr := net.JoinHostPort(destinationAddr.String(), port) connAddr := net.JoinHostPort(destinationAddr.String(), port)
@ -1085,7 +1085,7 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT
go func() { go func() {
defer wg.Done() defer wg.Done()
readConn, err := l.Accept() readConn, err := ln.Accept()
if err != nil { if err != nil {
readFinishedC <- err readFinishedC <- err
return return
@ -1146,11 +1146,11 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT
func newConn(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode, isProber bool, meshKey key.DERPMesh) (*derphttp.Client, error) { func newConn(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode, isProber bool, meshKey key.DERPMesh) (*derphttp.Client, error) {
// To avoid spamming the log with regular connection messages. // To avoid spamming the log with regular connection messages.
l := logger.Filtered(log.Printf, func(s string) bool { logf := logger.Filtered(log.Printf, func(s string) bool {
return !strings.Contains(s, "derphttp.Client.Connect: connecting to") return !strings.Contains(s, "derphttp.Client.Connect: connecting to")
}) })
priv := key.NewNode() priv := key.NewNode()
dc := derphttp.NewRegionClient(priv, l, netmon.NewStatic(), func() *tailcfg.DERPRegion { dc := derphttp.NewRegionClient(priv, logf, netmon.NewStatic(), func() *tailcfg.DERPRegion {
rid := n.RegionID rid := n.RegionID
return &tailcfg.DERPRegion{ return &tailcfg.DERPRegion{
RegionID: rid, RegionID: rid,

@ -118,25 +118,25 @@ func (p *Prober) Run(name string, interval time.Duration, labels Labels, pc Prob
panic(fmt.Sprintf("probe named %q already registered", name)) panic(fmt.Sprintf("probe named %q already registered", name))
} }
l := prometheus.Labels{ lb := prometheus.Labels{
"name": name, "name": name,
"class": pc.Class, "class": pc.Class,
} }
for k, v := range pc.Labels { for k, v := range pc.Labels {
l[k] = v lb[k] = v
} }
for k, v := range labels { for k, v := range labels {
l[k] = v lb[k] = v
} }
probe := newProbe(p, name, interval, l, pc) probe := newProbe(p, name, interval, lb, pc)
p.probes[name] = probe p.probes[name] = probe
go probe.loop() go probe.loop()
return probe return probe
} }
// newProbe creates a new Probe with the given parameters, but does not start it. // newProbe creates a new Probe with the given parameters, but does not start it.
func newProbe(p *Prober, name string, interval time.Duration, l prometheus.Labels, pc ProbeClass) *Probe { func newProbe(p *Prober, name string, interval time.Duration, lg prometheus.Labels, pc ProbeClass) *Probe {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
probe := &Probe{ probe := &Probe{
prober: p, prober: p,
@ -155,17 +155,17 @@ func newProbe(p *Prober, name string, interval time.Duration, l prometheus.Label
latencyHist: ring.New(recentHistSize), latencyHist: ring.New(recentHistSize),
metrics: prometheus.NewRegistry(), metrics: prometheus.NewRegistry(),
metricLabels: l, metricLabels: lg,
mInterval: prometheus.NewDesc("interval_secs", "Probe interval in seconds", nil, l), mInterval: prometheus.NewDesc("interval_secs", "Probe interval in seconds", nil, lg),
mStartTime: prometheus.NewDesc("start_secs", "Latest probe start time (seconds since epoch)", nil, l), mStartTime: prometheus.NewDesc("start_secs", "Latest probe start time (seconds since epoch)", nil, lg),
mEndTime: prometheus.NewDesc("end_secs", "Latest probe end time (seconds since epoch)", nil, l), mEndTime: prometheus.NewDesc("end_secs", "Latest probe end time (seconds since epoch)", nil, lg),
mLatency: prometheus.NewDesc("latency_millis", "Latest probe latency (ms)", nil, l), mLatency: prometheus.NewDesc("latency_millis", "Latest probe latency (ms)", nil, lg),
mResult: prometheus.NewDesc("result", "Latest probe result (1 = success, 0 = failure)", nil, l), mResult: prometheus.NewDesc("result", "Latest probe result (1 = success, 0 = failure)", nil, lg),
mAttempts: prometheus.NewCounterVec(prometheus.CounterOpts{ mAttempts: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "attempts_total", Help: "Total number of probing attempts", ConstLabels: l, Name: "attempts_total", Help: "Total number of probing attempts", ConstLabels: lg,
}, []string{"status"}), }, []string{"status"}),
mSeconds: prometheus.NewCounterVec(prometheus.CounterOpts{ mSeconds: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "seconds_total", Help: "Total amount of time spent executing the probe", ConstLabels: l, Name: "seconds_total", Help: "Total amount of time spent executing the probe", ConstLabels: lg,
}, []string{"status"}), }, []string{"status"}),
} }
if p.metrics != nil { if p.metrics != nil {
@ -512,8 +512,8 @@ func (probe *Probe) probeInfoLocked() ProbeInfo {
inf.Latency = probe.latency inf.Latency = probe.latency
} }
probe.latencyHist.Do(func(v any) { probe.latencyHist.Do(func(v any) {
if l, ok := v.(time.Duration); ok { if latency, ok := v.(time.Duration); ok {
inf.RecentLatencies = append(inf.RecentLatencies, l) inf.RecentLatencies = append(inf.RecentLatencies, latency)
} }
}) })
probe.successHist.Do(func(v any) { probe.successHist.Do(func(v any) {
@ -719,8 +719,8 @@ func initialDelay(seed string, interval time.Duration) time.Duration {
// Labels is a set of metric labels used by a prober. // Labels is a set of metric labels used by a prober.
type Labels map[string]string type Labels map[string]string
func (l Labels) With(k, v string) Labels { func (lb Labels) With(k, v string) Labels {
new := maps.Clone(l) new := maps.Clone(lb)
new[k] = v new[k] = v
return new return new
} }

@ -31,8 +31,8 @@ func (h AUMHash) String() string {
// UnmarshalText implements encoding.TextUnmarshaler. // UnmarshalText implements encoding.TextUnmarshaler.
func (h *AUMHash) UnmarshalText(text []byte) error { func (h *AUMHash) UnmarshalText(text []byte) error {
if l := base32StdNoPad.DecodedLen(len(text)); l != len(h) { if ln := base32StdNoPad.DecodedLen(len(text)); ln != len(h) {
return fmt.Errorf("tka.AUMHash.UnmarshalText: text wrong length: %d, want %d", l, len(text)) return fmt.Errorf("tka.AUMHash.UnmarshalText: text wrong length: %d, want %d", ln, len(text))
} }
if _, err := base32StdNoPad.Decode(h[:], text); err != nil { if _, err := base32StdNoPad.Decode(h[:], text); err != nil {
return fmt.Errorf("tka.AUMHash.UnmarshalText: %w", err) return fmt.Errorf("tka.AUMHash.UnmarshalText: %w", err)

@ -76,8 +76,8 @@ func TestSigNested(t *testing.T) {
if err := nestedSig.verifySignature(oldNode.Public(), k); err != nil { if err := nestedSig.verifySignature(oldNode.Public(), k); err != nil {
t.Fatalf("verifySignature(oldNode) failed: %v", err) t.Fatalf("verifySignature(oldNode) failed: %v", err)
} }
if l := sigChainLength(nestedSig); l != 1 { if ln := sigChainLength(nestedSig); ln != 1 {
t.Errorf("nestedSig chain length = %v, want 1", l) t.Errorf("nestedSig chain length = %v, want 1", ln)
} }
// The signature authorizing the rotation, signed by the // The signature authorizing the rotation, signed by the
@ -93,8 +93,8 @@ func TestSigNested(t *testing.T) {
if err := sig.verifySignature(node.Public(), k); err != nil { if err := sig.verifySignature(node.Public(), k); err != nil {
t.Fatalf("verifySignature(node) failed: %v", err) t.Fatalf("verifySignature(node) failed: %v", err)
} }
if l := sigChainLength(sig); l != 2 { if ln := sigChainLength(sig); ln != 2 {
t.Errorf("sig chain length = %v, want 2", l) t.Errorf("sig chain length = %v, want 2", ln)
} }
// Test verification fails if the wrong verification key is provided // Test verification fails if the wrong verification key is provided

@ -92,8 +92,8 @@ func (m *monitor) handleSummaryStatus(w http.ResponseWriter, r *http.Request) {
} }
slices.Sort(lines) slices.Sort(lines)
for _, l := range lines { for _, ln := range lines {
_, err = w.Write([]byte(fmt.Sprintf("%s\n", l))) _, err = w.Write([]byte(fmt.Sprintf("%s\n", ln)))
if err != nil { if err != nil {
log.Printf("monitor: error writing status: %v", err) log.Printf("monitor: error writing status: %v", err)
return return

@ -75,10 +75,10 @@ func fromCommand(bs []byte) (string, error) {
return args, nil return args, nil
} }
func (f *fsm) Apply(l *raft.Log) any { func (f *fsm) Apply(lg *raft.Log) any {
f.mu.Lock() f.mu.Lock()
defer f.mu.Unlock() defer f.mu.Unlock()
s, err := fromCommand(l.Data) s, err := fromCommand(lg.Data)
if err != nil { if err != nil {
return CommandResult{ return CommandResult{
Err: err, Err: err,

@ -1021,11 +1021,11 @@ func promMetricLabelsStr(labels []*dto.LabelPair) string {
} }
var b strings.Builder var b strings.Builder
b.WriteString("{") b.WriteString("{")
for i, l := range labels { for i, lb := range labels {
if i > 0 { if i > 0 {
b.WriteString(",") b.WriteString(",")
} }
b.WriteString(fmt.Sprintf("%s=%q", l.GetName(), l.GetValue())) b.WriteString(fmt.Sprintf("%s=%q", lb.GetName(), lb.GetValue()))
} }
b.WriteString("}") b.WriteString("}")
return b.String() return b.String()
@ -1033,8 +1033,8 @@ func promMetricLabelsStr(labels []*dto.LabelPair) string {
// sendData sends a given amount of bytes from s1 to s2. // sendData sends a given amount of bytes from s1 to s2.
func sendData(logf func(format string, args ...any), ctx context.Context, bytesCount int, s1, s2 *Server, s1ip, s2ip netip.Addr) error { func sendData(logf func(format string, args ...any), ctx context.Context, bytesCount int, s1, s2 *Server, s1ip, s2ip netip.Addr) error {
l := must.Get(s1.Listen("tcp", fmt.Sprintf("%s:8081", s1ip))) lb := must.Get(s1.Listen("tcp", fmt.Sprintf("%s:8081", s1ip)))
defer l.Close() defer lb.Close()
// Dial to s1 from s2 // Dial to s1 from s2
w, err := s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip)) w, err := s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip))
@ -1049,7 +1049,7 @@ func sendData(logf func(format string, args ...any), ctx context.Context, bytesC
defer close(allReceived) defer close(allReceived)
go func() { go func() {
conn, err := l.Accept() conn, err := lb.Accept()
if err != nil { if err != nil {
allReceived <- err allReceived <- err
return return

@ -184,14 +184,14 @@ type ipMapping struct {
// it is difficult to be 100% sure. This function should be used with care. It // it is difficult to be 100% sure. This function should be used with care. It
// will probably do what you want, but it is very easy to hold this wrong. // will probably do what you want, but it is very easy to hold this wrong.
func getProbablyFreePortNumber() (int, error) { func getProbablyFreePortNumber() (int, error) {
l, err := net.Listen("tcp", ":0") ln, err := net.Listen("tcp", ":0")
if err != nil { if err != nil {
return 0, err return 0, err
} }
defer l.Close() defer ln.Close()
_, port, err := net.SplitHostPort(l.Addr().String()) _, port, err := net.SplitHostPort(ln.Addr().String())
if err != nil { if err != nil {
return 0, err return 0, err
} }

@ -628,8 +628,8 @@ type loggingResponseWriter struct {
// from r, or falls back to logf. If a nil logger is given, the logs are // from r, or falls back to logf. If a nil logger is given, the logs are
// discarded. // discarded.
func newLogResponseWriter(logf logger.Logf, w http.ResponseWriter, r *http.Request) *loggingResponseWriter { func newLogResponseWriter(logf logger.Logf, w http.ResponseWriter, r *http.Request) *loggingResponseWriter {
if l, ok := logger.LogfKey.ValueOk(r.Context()); ok && l != nil { if lg, ok := logger.LogfKey.ValueOk(r.Context()); ok && lg != nil {
logf = l logf = lg
} }
if logf == nil { if logf == nil {
logf = logger.Discard logf = logger.Discard
@ -642,46 +642,46 @@ func newLogResponseWriter(logf logger.Logf, w http.ResponseWriter, r *http.Reque
} }
// WriteHeader implements [http.ResponseWriter]. // WriteHeader implements [http.ResponseWriter].
func (l *loggingResponseWriter) WriteHeader(statusCode int) { func (lg *loggingResponseWriter) WriteHeader(statusCode int) {
if l.code != 0 { if lg.code != 0 {
l.logf("[unexpected] HTTP handler set statusCode twice (%d and %d)", l.code, statusCode) lg.logf("[unexpected] HTTP handler set statusCode twice (%d and %d)", lg.code, statusCode)
return return
} }
if l.ctx.Err() == nil { if lg.ctx.Err() == nil {
l.code = statusCode lg.code = statusCode
} }
l.ResponseWriter.WriteHeader(statusCode) lg.ResponseWriter.WriteHeader(statusCode)
} }
// Write implements [http.ResponseWriter]. // Write implements [http.ResponseWriter].
func (l *loggingResponseWriter) Write(bs []byte) (int, error) { func (lg *loggingResponseWriter) Write(bs []byte) (int, error) {
if l.code == 0 { if lg.code == 0 {
l.code = 200 lg.code = 200
} }
n, err := l.ResponseWriter.Write(bs) n, err := lg.ResponseWriter.Write(bs)
l.bytes += n lg.bytes += n
return n, err return n, err
} }
// Hijack implements http.Hijacker. Note that hijacking can still fail // Hijack implements http.Hijacker. Note that hijacking can still fail
// because the wrapped ResponseWriter is not required to implement // because the wrapped ResponseWriter is not required to implement
// Hijacker, as this breaks HTTP/2. // Hijacker, as this breaks HTTP/2.
func (l *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { func (lg *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
h, ok := l.ResponseWriter.(http.Hijacker) h, ok := lg.ResponseWriter.(http.Hijacker)
if !ok { if !ok {
return nil, nil, errors.New("ResponseWriter is not a Hijacker") return nil, nil, errors.New("ResponseWriter is not a Hijacker")
} }
conn, buf, err := h.Hijack() conn, buf, err := h.Hijack()
if err == nil { if err == nil {
l.hijacked = true lg.hijacked = true
} }
return conn, buf, err return conn, buf, err
} }
func (l loggingResponseWriter) Flush() { func (lg loggingResponseWriter) Flush() {
f, _ := l.ResponseWriter.(http.Flusher) f, _ := lg.ResponseWriter.(http.Flusher)
if f == nil { if f == nil {
l.logf("[unexpected] tried to Flush a ResponseWriter that can't flush") lg.logf("[unexpected] tried to Flush a ResponseWriter that can't flush")
return return
} }
f.Flush() f.Flush()

@ -32,20 +32,20 @@ func TestPointAnonymize(t *testing.T) {
last := geo.MakePoint(llat, 0) last := geo.MakePoint(llat, 0)
cur := geo.MakePoint(lat, 0) cur := geo.MakePoint(lat, 0)
anon := cur.Quantize() anon := cur.Quantize()
switch l, g, err := anon.LatLng(); { switch latlng, g, err := anon.LatLng(); {
case err != nil: case err != nil:
t.Fatal(err) t.Fatal(err)
case lat == southPole: case lat == southPole:
// initialize llng, to the first snapped longitude // initialize llng, to the first snapped longitude
llat = l llat = latlng
goto Lng goto Lng
case g != 0: case g != 0:
t.Fatalf("%v is west or east of %v", anon, last) t.Fatalf("%v is west or east of %v", anon, last)
case l < llat: case latlng < llat:
t.Fatalf("%v is south of %v", anon, last) t.Fatalf("%v is south of %v", anon, last)
case l == llat: case latlng == llat:
continue continue
case l > llat: case latlng > llat:
switch dist, err := last.DistanceTo(anon); { switch dist, err := last.DistanceTo(anon); {
case err != nil: case err != nil:
t.Fatal(err) t.Fatal(err)
@ -55,7 +55,7 @@ func TestPointAnonymize(t *testing.T) {
t.Logf("lat=%v last=%v cur=%v anon=%v", lat, last, cur, anon) t.Logf("lat=%v last=%v cur=%v anon=%v", lat, last, cur, anon)
t.Fatalf("%v is too close to %v", anon, last) t.Fatalf("%v is too close to %v", anon, last)
default: default:
llat = l llat = latlng
} }
} }
@ -65,14 +65,14 @@ func TestPointAnonymize(t *testing.T) {
last := geo.MakePoint(llat, llng) last := geo.MakePoint(llat, llng)
cur := geo.MakePoint(lat, lng) cur := geo.MakePoint(lat, lng)
anon := cur.Quantize() anon := cur.Quantize()
switch l, g, err := anon.LatLng(); { switch latlng, g, err := anon.LatLng(); {
case err != nil: case err != nil:
t.Fatal(err) t.Fatal(err)
case lng == dateLine: case lng == dateLine:
// initialize llng, to the first snapped longitude // initialize llng, to the first snapped longitude
llng = g llng = g
continue continue
case l != llat: case latlng != llat:
t.Fatalf("%v is north or south of %v", anon, last) t.Fatalf("%v is north or south of %v", anon, last)
case g != llng: case g != llng:
const tolerance = geo.MinSeparation * 0x1p-9 const tolerance = geo.MinSeparation * 0x1p-9

@ -167,11 +167,11 @@ func (k DiscoPublic) String() string {
} }
// Compare returns an integer comparing DiscoPublic k and l lexicographically. // Compare returns an integer comparing DiscoPublic k and l lexicographically.
// The result will be 0 if k == l, -1 if k < l, and +1 if k > l. This is useful // The result will be 0 if k == other, -1 if k < other, and +1 if k > other.
// for situations requiring only one node in a pair to perform some operation, // This is useful for situations requiring only one node in a pair to perform
// e.g. probing UDP path lifetime. // some operation, e.g. probing UDP path lifetime.
func (k DiscoPublic) Compare(l DiscoPublic) int { func (k DiscoPublic) Compare(other DiscoPublic) int {
return bytes.Compare(k.k[:], l.k[:]) return bytes.Compare(k.k[:], other.k[:])
} }
// AppendText implements encoding.TextAppender. // AppendText implements encoding.TextAppender.

@ -45,36 +45,36 @@ func ListWithOpts[T ImmutableType](opts ...Options) List[T] {
// SetValue configures the preference with the specified value. // SetValue configures the preference with the specified value.
// It fails and returns [ErrManaged] if p is a managed preference, // It fails and returns [ErrManaged] if p is a managed preference,
// and [ErrReadOnly] if p is a read-only preference. // and [ErrReadOnly] if p is a read-only preference.
func (l *List[T]) SetValue(val []T) error { func (ls *List[T]) SetValue(val []T) error {
return l.preference.SetValue(cloneSlice(val)) return ls.preference.SetValue(cloneSlice(val))
} }
// SetManagedValue configures the preference with the specified value // SetManagedValue configures the preference with the specified value
// and marks the preference as managed. // and marks the preference as managed.
func (l *List[T]) SetManagedValue(val []T) { func (ls *List[T]) SetManagedValue(val []T) {
l.preference.SetManagedValue(cloneSlice(val)) ls.preference.SetManagedValue(cloneSlice(val))
} }
// View returns a read-only view of l. // View returns a read-only view of l.
func (l *List[T]) View() ListView[T] { func (ls *List[T]) View() ListView[T] {
return ListView[T]{l} return ListView[T]{ls}
} }
// Clone returns a copy of l that aliases no memory with l. // Clone returns a copy of l that aliases no memory with l.
func (l List[T]) Clone() *List[T] { func (ls List[T]) Clone() *List[T] {
res := ptr.To(l) res := ptr.To(ls)
if v, ok := l.s.Value.GetOk(); ok { if v, ok := ls.s.Value.GetOk(); ok {
res.s.Value.Set(append(v[:0:0], v...)) res.s.Value.Set(append(v[:0:0], v...))
} }
return res return res
} }
// Equal reports whether l and l2 are equal. // Equal reports whether l and l2 are equal.
func (l List[T]) Equal(l2 List[T]) bool { func (ls List[T]) Equal(l2 List[T]) bool {
if l.s.Metadata != l2.s.Metadata { if ls.s.Metadata != l2.s.Metadata {
return false return false
} }
v1, ok1 := l.s.Value.GetOk() v1, ok1 := ls.s.Value.GetOk()
v2, ok2 := l2.s.Value.GetOk() v2, ok2 := l2.s.Value.GetOk()
if ok1 != ok2 { if ok1 != ok2 {
return false return false

@ -487,31 +487,31 @@ func TestItemView(t *testing.T) {
} }
func TestListView(t *testing.T) { func TestListView(t *testing.T) {
l := ListOf([]int{4, 8, 15, 16, 23, 42}, ReadOnly) ls := ListOf([]int{4, 8, 15, 16, 23, 42}, ReadOnly)
lv := l.View() lv := ls.View()
checkIsSet(t, lv, true) checkIsSet(t, lv, true)
checkIsManaged(t, lv, false) checkIsManaged(t, lv, false)
checkIsReadOnly(t, lv, true) checkIsReadOnly(t, lv, true)
checkValue(t, lv, views.SliceOf(l.Value())) checkValue(t, lv, views.SliceOf(ls.Value()))
checkValueOk(t, lv, views.SliceOf(l.Value()), true) checkValueOk(t, lv, views.SliceOf(ls.Value()), true)
l2 := *lv.AsStruct() l2 := *lv.AsStruct()
checkEqual(t, l, l2, true) checkEqual(t, ls, l2, true)
} }
func TestStructListView(t *testing.T) { func TestStructListView(t *testing.T) {
l := StructListOf([]*TestBundle{{Name: "E1"}, {Name: "E2"}}, ReadOnly) ls := StructListOf([]*TestBundle{{Name: "E1"}, {Name: "E2"}}, ReadOnly)
lv := StructListViewOf(&l) lv := StructListViewOf(&ls)
checkIsSet(t, lv, true) checkIsSet(t, lv, true)
checkIsManaged(t, lv, false) checkIsManaged(t, lv, false)
checkIsReadOnly(t, lv, true) checkIsReadOnly(t, lv, true)
checkValue(t, lv, views.SliceOfViews(l.Value())) checkValue(t, lv, views.SliceOfViews(ls.Value()))
checkValueOk(t, lv, views.SliceOfViews(l.Value()), true) checkValueOk(t, lv, views.SliceOfViews(ls.Value()), true)
l2 := *lv.AsStruct() l2 := *lv.AsStruct()
checkEqual(t, l, l2, true) checkEqual(t, ls, l2, true)
} }
func TestStructMapView(t *testing.T) { func TestStructMapView(t *testing.T) {

@ -33,20 +33,20 @@ func StructListWithOpts[T views.Cloner[T]](opts ...Options) StructList[T] {
// SetValue configures the preference with the specified value. // SetValue configures the preference with the specified value.
// It fails and returns [ErrManaged] if p is a managed preference, // It fails and returns [ErrManaged] if p is a managed preference,
// and [ErrReadOnly] if p is a read-only preference. // and [ErrReadOnly] if p is a read-only preference.
func (l *StructList[T]) SetValue(val []T) error { func (ls *StructList[T]) SetValue(val []T) error {
return l.preference.SetValue(deepCloneSlice(val)) return ls.preference.SetValue(deepCloneSlice(val))
} }
// SetManagedValue configures the preference with the specified value // SetManagedValue configures the preference with the specified value
// and marks the preference as managed. // and marks the preference as managed.
func (l *StructList[T]) SetManagedValue(val []T) { func (ls *StructList[T]) SetManagedValue(val []T) {
l.preference.SetManagedValue(deepCloneSlice(val)) ls.preference.SetManagedValue(deepCloneSlice(val))
} }
// Clone returns a copy of l that aliases no memory with l. // Clone returns a copy of l that aliases no memory with l.
func (l StructList[T]) Clone() *StructList[T] { func (ls StructList[T]) Clone() *StructList[T] {
res := ptr.To(l) res := ptr.To(ls)
if v, ok := l.s.Value.GetOk(); ok { if v, ok := ls.s.Value.GetOk(); ok {
res.s.Value.Set(deepCloneSlice(v)) res.s.Value.Set(deepCloneSlice(v))
} }
return res return res
@ -56,11 +56,11 @@ func (l StructList[T]) Clone() *StructList[T] {
// If the template type T implements an Equal(T) bool method, it will be used // If the template type T implements an Equal(T) bool method, it will be used
// instead of the == operator for value comparison. // instead of the == operator for value comparison.
// It panics if T is not comparable. // It panics if T is not comparable.
func (l StructList[T]) Equal(l2 StructList[T]) bool { func (ls StructList[T]) Equal(l2 StructList[T]) bool {
if l.s.Metadata != l2.s.Metadata { if ls.s.Metadata != l2.s.Metadata {
return false return false
} }
v1, ok1 := l.s.Value.GetOk() v1, ok1 := ls.s.Value.GetOk()
v2, ok2 := l2.s.Value.GetOk() v2, ok2 := l2.s.Value.GetOk()
if ok1 != ok2 { if ok1 != ok2 {
return false return false
@ -105,8 +105,8 @@ type StructListView[T views.ViewCloner[T, V], V views.StructView[T]] struct {
// StructListViewOf returns a read-only view of l. // StructListViewOf returns a read-only view of l.
// It is used by [tailscale.com/cmd/viewer]. // It is used by [tailscale.com/cmd/viewer].
func StructListViewOf[T views.ViewCloner[T, V], V views.StructView[T]](l *StructList[T]) StructListView[T, V] { func StructListViewOf[T views.ViewCloner[T, V], V views.StructView[T]](ls *StructList[T]) StructListView[T, V] {
return StructListView[T, V]{l} return StructListView[T, V]{ls}
} }
// Valid reports whether the underlying [StructList] is non-nil. // Valid reports whether the underlying [StructList] is non-nil.

@ -31,14 +31,14 @@ func StructMapWithOpts[K MapKeyType, V views.Cloner[V]](opts ...Options) StructM
// SetValue configures the preference with the specified value. // SetValue configures the preference with the specified value.
// It fails and returns [ErrManaged] if p is a managed preference, // It fails and returns [ErrManaged] if p is a managed preference,
// and [ErrReadOnly] if p is a read-only preference. // and [ErrReadOnly] if p is a read-only preference.
func (l *StructMap[K, V]) SetValue(val map[K]V) error { func (m *StructMap[K, V]) SetValue(val map[K]V) error {
return l.preference.SetValue(deepCloneMap(val)) return m.preference.SetValue(deepCloneMap(val))
} }
// SetManagedValue configures the preference with the specified value // SetManagedValue configures the preference with the specified value
// and marks the preference as managed. // and marks the preference as managed.
func (l *StructMap[K, V]) SetManagedValue(val map[K]V) { func (m *StructMap[K, V]) SetManagedValue(val map[K]V) {
l.preference.SetManagedValue(deepCloneMap(val)) m.preference.SetManagedValue(deepCloneMap(val))
} }
// Clone returns a copy of m that aliases no memory with m. // Clone returns a copy of m that aliases no memory with m.

@ -94,59 +94,59 @@ type bucket struct {
// Allow charges the key one token (up to the overdraft limit), and // Allow charges the key one token (up to the overdraft limit), and
// reports whether the key can perform an action. // reports whether the key can perform an action.
func (l *Limiter[K]) Allow(key K) bool { func (lm *Limiter[K]) Allow(key K) bool {
return l.allow(key, time.Now()) return lm.allow(key, time.Now())
} }
func (l *Limiter[K]) allow(key K, now time.Time) bool { func (lm *Limiter[K]) allow(key K, now time.Time) bool {
l.mu.Lock() lm.mu.Lock()
defer l.mu.Unlock() defer lm.mu.Unlock()
return l.allowBucketLocked(l.getBucketLocked(key, now), now) return lm.allowBucketLocked(lm.getBucketLocked(key, now), now)
} }
func (l *Limiter[K]) getBucketLocked(key K, now time.Time) *bucket { func (lm *Limiter[K]) getBucketLocked(key K, now time.Time) *bucket {
if l.cache == nil { if lm.cache == nil {
l.cache = &lru.Cache[K, *bucket]{MaxEntries: l.Size} lm.cache = &lru.Cache[K, *bucket]{MaxEntries: lm.Size}
} else if b := l.cache.Get(key); b != nil { } else if b := lm.cache.Get(key); b != nil {
return b return b
} }
b := &bucket{ b := &bucket{
cur: l.Max, cur: lm.Max,
lastUpdate: now.Truncate(l.RefillInterval), lastUpdate: now.Truncate(lm.RefillInterval),
} }
l.cache.Set(key, b) lm.cache.Set(key, b)
return b return b
} }
func (l *Limiter[K]) allowBucketLocked(b *bucket, now time.Time) bool { func (lm *Limiter[K]) allowBucketLocked(b *bucket, now time.Time) bool {
// Only update the bucket quota if needed to process request. // Only update the bucket quota if needed to process request.
if b.cur <= 0 { if b.cur <= 0 {
l.updateBucketLocked(b, now) lm.updateBucketLocked(b, now)
} }
ret := b.cur > 0 ret := b.cur > 0
if b.cur > -l.Overdraft { if b.cur > -lm.Overdraft {
b.cur-- b.cur--
} }
return ret return ret
} }
func (l *Limiter[K]) updateBucketLocked(b *bucket, now time.Time) { func (lm *Limiter[K]) updateBucketLocked(b *bucket, now time.Time) {
now = now.Truncate(l.RefillInterval) now = now.Truncate(lm.RefillInterval)
if now.Before(b.lastUpdate) { if now.Before(b.lastUpdate) {
return return
} }
timeDelta := max(now.Sub(b.lastUpdate), 0) timeDelta := max(now.Sub(b.lastUpdate), 0)
tokenDelta := int64(timeDelta / l.RefillInterval) tokenDelta := int64(timeDelta / lm.RefillInterval)
b.cur = min(b.cur+tokenDelta, l.Max) b.cur = min(b.cur+tokenDelta, lm.Max)
b.lastUpdate = now b.lastUpdate = now
} }
// peekForTest returns the number of tokens for key, also reporting // peekForTest returns the number of tokens for key, also reporting
// whether key was present. // whether key was present.
func (l *Limiter[K]) tokensForTest(key K) (int64, bool) { func (lm *Limiter[K]) tokensForTest(key K) (int64, bool) {
l.mu.Lock() lm.mu.Lock()
defer l.mu.Unlock() defer lm.mu.Unlock()
if b, ok := l.cache.PeekOk(key); ok { if b, ok := lm.cache.PeekOk(key); ok {
return b.cur, true return b.cur, true
} }
return 0, false return 0, false
@ -159,12 +159,12 @@ func (l *Limiter[K]) tokensForTest(key K) (int64, bool) {
// DumpHTML blocks other callers of the limiter while it collects the // DumpHTML blocks other callers of the limiter while it collects the
// state for dumping. It should not be called on large limiters // state for dumping. It should not be called on large limiters
// involved in hot codepaths. // involved in hot codepaths.
func (l *Limiter[K]) DumpHTML(w io.Writer, onlyLimited bool) { func (lm *Limiter[K]) DumpHTML(w io.Writer, onlyLimited bool) {
l.dumpHTML(w, onlyLimited, time.Now()) lm.dumpHTML(w, onlyLimited, time.Now())
} }
func (l *Limiter[K]) dumpHTML(w io.Writer, onlyLimited bool, now time.Time) { func (lm *Limiter[K]) dumpHTML(w io.Writer, onlyLimited bool, now time.Time) {
dump := l.collectDump(now) dump := lm.collectDump(now)
io.WriteString(w, "<table><tr><th>Key</th><th>Tokens</th></tr>") io.WriteString(w, "<table><tr><th>Key</th><th>Tokens</th></tr>")
for _, line := range dump { for _, line := range dump {
if onlyLimited && line.Tokens > 0 { if onlyLimited && line.Tokens > 0 {
@ -183,13 +183,13 @@ func (l *Limiter[K]) dumpHTML(w io.Writer, onlyLimited bool, now time.Time) {
} }
// collectDump grabs a copy of the limiter state needed by DumpHTML. // collectDump grabs a copy of the limiter state needed by DumpHTML.
func (l *Limiter[K]) collectDump(now time.Time) []dumpEntry[K] { func (lm *Limiter[K]) collectDump(now time.Time) []dumpEntry[K] {
l.mu.Lock() lm.mu.Lock()
defer l.mu.Unlock() defer lm.mu.Unlock()
ret := make([]dumpEntry[K], 0, l.cache.Len()) ret := make([]dumpEntry[K], 0, lm.cache.Len())
l.cache.ForEach(func(k K, v *bucket) { lm.cache.ForEach(func(k K, v *bucket) {
l.updateBucketLocked(v, now) // so stats are accurate lm.updateBucketLocked(v, now) // so stats are accurate
ret = append(ret, dumpEntry[K]{k, v.cur}) ret = append(ret, dumpEntry[K]{k, v.cur})
}) })
return ret return ret

@ -16,7 +16,7 @@ const testRefillInterval = time.Second
func TestLimiter(t *testing.T) { func TestLimiter(t *testing.T) {
// 1qps, burst of 10, 2 keys tracked // 1qps, burst of 10, 2 keys tracked
l := &Limiter[string]{ limiter := &Limiter[string]{
Size: 2, Size: 2,
Max: 10, Max: 10,
RefillInterval: testRefillInterval, RefillInterval: testRefillInterval,
@ -24,48 +24,48 @@ func TestLimiter(t *testing.T) {
// Consume entire burst // Consume entire burst
now := time.Now().Truncate(testRefillInterval) now := time.Now().Truncate(testRefillInterval)
allowed(t, l, "foo", 10, now) allowed(t, limiter, "foo", 10, now)
denied(t, l, "foo", 1, now) denied(t, limiter, "foo", 1, now)
hasTokens(t, l, "foo", 0) hasTokens(t, limiter, "foo", 0)
allowed(t, l, "bar", 10, now) allowed(t, limiter, "bar", 10, now)
denied(t, l, "bar", 1, now) denied(t, limiter, "bar", 1, now)
hasTokens(t, l, "bar", 0) hasTokens(t, limiter, "bar", 0)
// Refill 1 token for both foo and bar // Refill 1 token for both foo and bar
now = now.Add(time.Second + time.Millisecond) now = now.Add(time.Second + time.Millisecond)
allowed(t, l, "foo", 1, now) allowed(t, limiter, "foo", 1, now)
denied(t, l, "foo", 1, now) denied(t, limiter, "foo", 1, now)
hasTokens(t, l, "foo", 0) hasTokens(t, limiter, "foo", 0)
allowed(t, l, "bar", 1, now) allowed(t, limiter, "bar", 1, now)
denied(t, l, "bar", 1, now) denied(t, limiter, "bar", 1, now)
hasTokens(t, l, "bar", 0) hasTokens(t, limiter, "bar", 0)
// Refill 2 tokens for foo and bar // Refill 2 tokens for foo and bar
now = now.Add(2*time.Second + time.Millisecond) now = now.Add(2*time.Second + time.Millisecond)
allowed(t, l, "foo", 2, now) allowed(t, limiter, "foo", 2, now)
denied(t, l, "foo", 1, now) denied(t, limiter, "foo", 1, now)
hasTokens(t, l, "foo", 0) hasTokens(t, limiter, "foo", 0)
allowed(t, l, "bar", 2, now) allowed(t, limiter, "bar", 2, now)
denied(t, l, "bar", 1, now) denied(t, limiter, "bar", 1, now)
hasTokens(t, l, "bar", 0) hasTokens(t, limiter, "bar", 0)
// qux can burst 10, evicts foo so it can immediately burst 10 again too // qux can burst 10, evicts foo so it can immediately burst 10 again too
allowed(t, l, "qux", 10, now) allowed(t, limiter, "qux", 10, now)
denied(t, l, "qux", 1, now) denied(t, limiter, "qux", 1, now)
notInLimiter(t, l, "foo") notInLimiter(t, limiter, "foo")
denied(t, l, "bar", 1, now) // refresh bar so foo lookup doesn't evict it - still throttled denied(t, limiter, "bar", 1, now) // refresh bar so foo lookup doesn't evict it - still throttled
allowed(t, l, "foo", 10, now) allowed(t, limiter, "foo", 10, now)
denied(t, l, "foo", 1, now) denied(t, limiter, "foo", 1, now)
hasTokens(t, l, "foo", 0) hasTokens(t, limiter, "foo", 0)
} }
func TestLimiterOverdraft(t *testing.T) { func TestLimiterOverdraft(t *testing.T) {
// 1qps, burst of 10, overdraft of 2, 2 keys tracked // 1qps, burst of 10, overdraft of 2, 2 keys tracked
l := &Limiter[string]{ limiter := &Limiter[string]{
Size: 2, Size: 2,
Max: 10, Max: 10,
Overdraft: 2, Overdraft: 2,
@ -74,51 +74,51 @@ func TestLimiterOverdraft(t *testing.T) {
// Consume entire burst, go 1 into debt // Consume entire burst, go 1 into debt
now := time.Now().Truncate(testRefillInterval).Add(time.Millisecond) now := time.Now().Truncate(testRefillInterval).Add(time.Millisecond)
allowed(t, l, "foo", 10, now) allowed(t, limiter, "foo", 10, now)
denied(t, l, "foo", 1, now) denied(t, limiter, "foo", 1, now)
hasTokens(t, l, "foo", -1) hasTokens(t, limiter, "foo", -1)
allowed(t, l, "bar", 10, now) allowed(t, limiter, "bar", 10, now)
denied(t, l, "bar", 1, now) denied(t, limiter, "bar", 1, now)
hasTokens(t, l, "bar", -1) hasTokens(t, limiter, "bar", -1)
// Refill 1 token for both foo and bar. // Refill 1 token for both foo and bar.
// Still denied, still in debt. // Still denied, still in debt.
now = now.Add(time.Second) now = now.Add(time.Second)
denied(t, l, "foo", 1, now) denied(t, limiter, "foo", 1, now)
hasTokens(t, l, "foo", -1) hasTokens(t, limiter, "foo", -1)
denied(t, l, "bar", 1, now) denied(t, limiter, "bar", 1, now)
hasTokens(t, l, "bar", -1) hasTokens(t, limiter, "bar", -1)
// Refill 2 tokens for foo and bar (1 available after debt), try // Refill 2 tokens for foo and bar (1 available after debt), try
// to consume 4. Overdraft is capped to 2. // to consume 4. Overdraft is capped to 2.
now = now.Add(2 * time.Second) now = now.Add(2 * time.Second)
allowed(t, l, "foo", 1, now) allowed(t, limiter, "foo", 1, now)
denied(t, l, "foo", 3, now) denied(t, limiter, "foo", 3, now)
hasTokens(t, l, "foo", -2) hasTokens(t, limiter, "foo", -2)
allowed(t, l, "bar", 1, now) allowed(t, limiter, "bar", 1, now)
denied(t, l, "bar", 3, now) denied(t, limiter, "bar", 3, now)
hasTokens(t, l, "bar", -2) hasTokens(t, limiter, "bar", -2)
// Refill 1, not enough to allow. // Refill 1, not enough to allow.
now = now.Add(time.Second) now = now.Add(time.Second)
denied(t, l, "foo", 1, now) denied(t, limiter, "foo", 1, now)
hasTokens(t, l, "foo", -2) hasTokens(t, limiter, "foo", -2)
denied(t, l, "bar", 1, now) denied(t, limiter, "bar", 1, now)
hasTokens(t, l, "bar", -2) hasTokens(t, limiter, "bar", -2)
// qux evicts foo, foo can immediately burst 10 again. // qux evicts foo, foo can immediately burst 10 again.
allowed(t, l, "qux", 1, now) allowed(t, limiter, "qux", 1, now)
hasTokens(t, l, "qux", 9) hasTokens(t, limiter, "qux", 9)
notInLimiter(t, l, "foo") notInLimiter(t, limiter, "foo")
allowed(t, l, "foo", 10, now) allowed(t, limiter, "foo", 10, now)
denied(t, l, "foo", 1, now) denied(t, limiter, "foo", 1, now)
hasTokens(t, l, "foo", -1) hasTokens(t, limiter, "foo", -1)
} }
func TestDumpHTML(t *testing.T) { func TestDumpHTML(t *testing.T) {
l := &Limiter[string]{ limiter := &Limiter[string]{
Size: 3, Size: 3,
Max: 10, Max: 10,
Overdraft: 10, Overdraft: 10,
@ -126,13 +126,13 @@ func TestDumpHTML(t *testing.T) {
} }
now := time.Now().Truncate(testRefillInterval).Add(time.Millisecond) now := time.Now().Truncate(testRefillInterval).Add(time.Millisecond)
allowed(t, l, "foo", 10, now) allowed(t, limiter, "foo", 10, now)
denied(t, l, "foo", 2, now) denied(t, limiter, "foo", 2, now)
allowed(t, l, "bar", 4, now) allowed(t, limiter, "bar", 4, now)
allowed(t, l, "qux", 1, now) allowed(t, limiter, "qux", 1, now)
var out bytes.Buffer var out bytes.Buffer
l.DumpHTML(&out, false) limiter.DumpHTML(&out, false)
want := strings.Join([]string{ want := strings.Join([]string{
"<table>", "<table>",
"<tr><th>Key</th><th>Tokens</th></tr>", "<tr><th>Key</th><th>Tokens</th></tr>",
@ -146,7 +146,7 @@ func TestDumpHTML(t *testing.T) {
} }
out.Reset() out.Reset()
l.DumpHTML(&out, true) limiter.DumpHTML(&out, true)
want = strings.Join([]string{ want = strings.Join([]string{
"<table>", "<table>",
"<tr><th>Key</th><th>Tokens</th></tr>", "<tr><th>Key</th><th>Tokens</th></tr>",
@ -161,7 +161,7 @@ func TestDumpHTML(t *testing.T) {
// organically. // organically.
now = now.Add(3 * time.Second) now = now.Add(3 * time.Second)
out.Reset() out.Reset()
l.dumpHTML(&out, false, now) limiter.dumpHTML(&out, false, now)
want = strings.Join([]string{ want = strings.Join([]string{
"<table>", "<table>",
"<tr><th>Key</th><th>Tokens</th></tr>", "<tr><th>Key</th><th>Tokens</th></tr>",
@ -175,29 +175,29 @@ func TestDumpHTML(t *testing.T) {
} }
} }
func allowed(t *testing.T, l *Limiter[string], key string, count int, now time.Time) { func allowed(t *testing.T, limiter *Limiter[string], key string, count int, now time.Time) {
t.Helper() t.Helper()
for i := range count { for i := range count {
if !l.allow(key, now) { if !limiter.allow(key, now) {
toks, ok := l.tokensForTest(key) toks, ok := limiter.tokensForTest(key)
t.Errorf("after %d times: allow(%q, %q) = false, want true (%d tokens available, in cache = %v)", i, key, now, toks, ok) t.Errorf("after %d times: allow(%q, %q) = false, want true (%d tokens available, in cache = %v)", i, key, now, toks, ok)
} }
} }
} }
func denied(t *testing.T, l *Limiter[string], key string, count int, now time.Time) { func denied(t *testing.T, limiter *Limiter[string], key string, count int, now time.Time) {
t.Helper() t.Helper()
for i := range count { for i := range count {
if l.allow(key, now) { if limiter.allow(key, now) {
toks, ok := l.tokensForTest(key) toks, ok := limiter.tokensForTest(key)
t.Errorf("after %d times: allow(%q, %q) = true, want false (%d tokens available, in cache = %v)", i, key, now, toks, ok) t.Errorf("after %d times: allow(%q, %q) = true, want false (%d tokens available, in cache = %v)", i, key, now, toks, ok)
} }
} }
} }
func hasTokens(t *testing.T, l *Limiter[string], key string, want int64) { func hasTokens(t *testing.T, limiter *Limiter[string], key string, want int64) {
t.Helper() t.Helper()
got, ok := l.tokensForTest(key) got, ok := limiter.tokensForTest(key)
if !ok { if !ok {
t.Errorf("key %q missing from limiter", key) t.Errorf("key %q missing from limiter", key)
} else if got != want { } else if got != want {
@ -205,9 +205,9 @@ func hasTokens(t *testing.T, l *Limiter[string], key string, want int64) {
} }
} }
func notInLimiter(t *testing.T, l *Limiter[string], key string) { func notInLimiter(t *testing.T, limiter *Limiter[string], key string) {
t.Helper() t.Helper()
if tokens, ok := l.tokensForTest(key); ok { if tokens, ok := limiter.tokensForTest(key); ok {
t.Errorf("key %q unexpectedly tracked by limiter, with %d tokens", key, tokens) t.Errorf("key %q unexpectedly tracked by limiter, with %d tokens", key, tokens)
} }
} }

@ -85,7 +85,7 @@ type tableDetector interface {
type linuxFWDetector struct{} type linuxFWDetector struct{}
// iptDetect returns the number of iptables rules in the current namespace. // iptDetect returns the number of iptables rules in the current namespace.
func (l linuxFWDetector) iptDetect() (int, error) { func (ld linuxFWDetector) iptDetect() (int, error) {
return detectIptables() return detectIptables()
} }
@ -96,7 +96,7 @@ var hookDetectNetfilter feature.Hook[func() (int, error)]
var ErrUnsupported = errors.New("linuxfw:unsupported") var ErrUnsupported = errors.New("linuxfw:unsupported")
// nftDetect returns the number of nftables rules in the current namespace. // nftDetect returns the number of nftables rules in the current namespace.
func (l linuxFWDetector) nftDetect() (int, error) { func (ld linuxFWDetector) nftDetect() (int, error) {
if f, ok := hookDetectNetfilter.GetOk(); ok { if f, ok := hookDetectNetfilter.GetOk(); ok {
return f() return f()
} }

@ -84,8 +84,8 @@ func TestStressEvictions(t *testing.T) {
for range numProbes { for range numProbes {
v := vals[rand.Intn(len(vals))] v := vals[rand.Intn(len(vals))]
c.Set(v, true) c.Set(v, true)
if l := c.Len(); l > cacheSize { if ln := c.Len(); ln > cacheSize {
t.Fatalf("Cache size now %d, want max %d", l, cacheSize) t.Fatalf("Cache size now %d, want max %d", ln, cacheSize)
} }
} }
} }
@ -119,8 +119,8 @@ func TestStressBatchedEvictions(t *testing.T) {
c.DeleteOldest() c.DeleteOldest()
} }
} }
if l := c.Len(); l > cacheSizeMax { if ln := c.Len(); ln > cacheSizeMax {
t.Fatalf("Cache size now %d, want max %d", l, cacheSizeMax) t.Fatalf("Cache size now %d, want max %d", ln, cacheSizeMax)
} }
} }
} }

@ -322,33 +322,33 @@ func Definitions() ([]*Definition, error) {
type PlatformList []string type PlatformList []string
// Has reports whether l contains the target platform. // Has reports whether l contains the target platform.
func (l PlatformList) Has(target string) bool { func (ls PlatformList) Has(target string) bool {
if len(l) == 0 { if len(ls) == 0 {
return true return true
} }
return slices.ContainsFunc(l, func(os string) bool { return slices.ContainsFunc(ls, func(os string) bool {
return strings.EqualFold(os, target) return strings.EqualFold(os, target)
}) })
} }
// HasCurrent is like Has, but for the current platform. // HasCurrent is like Has, but for the current platform.
func (l PlatformList) HasCurrent() bool { func (ls PlatformList) HasCurrent() bool {
return l.Has(internal.OS()) return ls.Has(internal.OS())
} }
// mergeFrom merges l2 into l. Since an empty list indicates no platform restrictions, // mergeFrom merges l2 into l. Since an empty list indicates no platform restrictions,
// if either l or l2 is empty, the merged result in l will also be empty. // if either l or l2 is empty, the merged result in l will also be empty.
func (l *PlatformList) mergeFrom(l2 PlatformList) { func (ls *PlatformList) mergeFrom(l2 PlatformList) {
switch { switch {
case len(*l) == 0: case len(*ls) == 0:
// No-op. An empty list indicates no platform restrictions. // No-op. An empty list indicates no platform restrictions.
case len(l2) == 0: case len(l2) == 0:
// Merging with an empty list results in an empty list. // Merging with an empty list results in an empty list.
*l = l2 *ls = l2
default: default:
// Append, sort and dedup. // Append, sort and dedup.
*l = append(*l, l2...) *ls = append(*ls, l2...)
slices.Sort(*l) slices.Sort(*ls)
*l = slices.Compact(*l) *ls = slices.Compact(*ls)
} }
} }

@ -311,8 +311,8 @@ func TestListSettingDefinitions(t *testing.T) {
t.Fatalf("SetDefinitionsForTest failed: %v", err) t.Fatalf("SetDefinitionsForTest failed: %v", err)
} }
cmp := func(l, r *Definition) int { cmp := func(a, b *Definition) int {
return strings.Compare(string(l.Key()), string(r.Key())) return strings.Compare(string(a.Key()), string(b.Key()))
} }
want := append([]*Definition{}, definitions...) want := append([]*Definition{}, definitions...)
slices.SortFunc(want, cmp) slices.SortFunc(want, cmp)

@ -182,16 +182,16 @@ func doWithMachinePolicyLocked(t *testing.T, f func()) {
f() f()
} }
func doWithCustomEnterLeaveFuncs(t *testing.T, f func(l *PolicyLock), enter func(bool) (policyLockHandle, error), leave func(policyLockHandle) error) { func doWithCustomEnterLeaveFuncs(t *testing.T, f func(*PolicyLock), enter func(bool) (policyLockHandle, error), leave func(policyLockHandle) error) {
t.Helper() t.Helper()
l := NewMachinePolicyLock() lock := NewMachinePolicyLock()
l.enterFn, l.leaveFn = enter, leave lock.enterFn, lock.leaveFn = enter, leave
t.Cleanup(func() { t.Cleanup(func() {
if err := l.Close(); err != nil { if err := lock.Close(); err != nil {
t.Fatalf("(*PolicyLock).Close failed: %v", err) t.Fatalf("(*PolicyLock).Close failed: %v", err)
} }
}) })
f(l) f(lock)
} }

@ -127,32 +127,32 @@ func NewUserPolicyLock(token windows.Token) (*PolicyLock, error) {
return lock, nil return lock, nil
} }
// Lock locks l. // Lock locks lk.
// It returns [ErrInvalidLockState] if l has a zero value or has already been closed, // It returns [ErrInvalidLockState] if lk has a zero value or has already been closed,
// [ErrLockRestricted] if the lock cannot be acquired due to a restriction in place, // [ErrLockRestricted] if the lock cannot be acquired due to a restriction in place,
// or a [syscall.Errno] if the underlying Group Policy lock cannot be acquired. // or a [syscall.Errno] if the underlying Group Policy lock cannot be acquired.
// //
// As a special case, it fails with [windows.ERROR_ACCESS_DENIED] // As a special case, it fails with [windows.ERROR_ACCESS_DENIED]
// if l is a user policy lock, and the corresponding user is not logged in // if lk is a user policy lock, and the corresponding user is not logged in
// interactively at the time of the call. // interactively at the time of the call.
func (l *PolicyLock) Lock() error { func (lk *PolicyLock) Lock() error {
if policyLockRestricted.Load() > 0 { if policyLockRestricted.Load() > 0 {
return ErrLockRestricted return ErrLockRestricted
} }
l.mu.Lock() lk.mu.Lock()
defer l.mu.Unlock() defer lk.mu.Unlock()
if l.lockCnt.Add(2)&1 == 0 { if lk.lockCnt.Add(2)&1 == 0 {
// The lock cannot be acquired because it has either never been properly // The lock cannot be acquired because it has either never been properly
// created or its Close method has already been called. However, we need // created or its Close method has already been called. However, we need
// to call Unlock to both decrement lockCnt and leave the underlying // to call Unlock to both decrement lockCnt and leave the underlying
// CriticalPolicySection if we won the race with another goroutine and // CriticalPolicySection if we won the race with another goroutine and
// now own the lock. // now own the lock.
l.Unlock() lk.Unlock()
return ErrInvalidLockState return ErrInvalidLockState
} }
if l.handle != 0 { if lk.handle != 0 {
// The underlying CriticalPolicySection is already acquired. // The underlying CriticalPolicySection is already acquired.
// It is an R-Lock (with the W-counterpart owned by the Group Policy service), // It is an R-Lock (with the W-counterpart owned by the Group Policy service),
// meaning that it can be acquired by multiple readers simultaneously. // meaning that it can be acquired by multiple readers simultaneously.
@ -160,20 +160,20 @@ func (l *PolicyLock) Lock() error {
return nil return nil
} }
return l.lockSlow() return lk.lockSlow()
} }
// lockSlow calls enterCriticalPolicySection to acquire the underlying GP read lock. // lockSlow calls enterCriticalPolicySection to acquire the underlying GP read lock.
// It waits for either the lock to be acquired, or for the Close method to be called. // It waits for either the lock to be acquired, or for the Close method to be called.
// //
// l.mu must be held. // l.mu must be held.
func (l *PolicyLock) lockSlow() (err error) { func (lk *PolicyLock) lockSlow() (err error) {
defer func() { defer func() {
if err != nil { if err != nil {
// Decrement the counter if the lock cannot be acquired, // Decrement the counter if the lock cannot be acquired,
// and complete the pending close request if we're the last owner. // and complete the pending close request if we're the last owner.
if l.lockCnt.Add(-2) == 0 { if lk.lockCnt.Add(-2) == 0 {
l.closeInternal() lk.closeInternal()
} }
} }
}() }()
@ -190,12 +190,12 @@ func (l *PolicyLock) lockSlow() (err error) {
resultCh := make(chan policyLockResult) resultCh := make(chan policyLockResult)
go func() { go func() {
closing := l.closing closing := lk.closing
if l.scope == UserPolicy && l.token != 0 { if lk.scope == UserPolicy && lk.token != 0 {
// Impersonate the user whose critical policy section we want to acquire. // Impersonate the user whose critical policy section we want to acquire.
runtime.LockOSThread() runtime.LockOSThread()
defer runtime.UnlockOSThread() defer runtime.UnlockOSThread()
if err := impersonateLoggedOnUser(l.token); err != nil { if err := impersonateLoggedOnUser(lk.token); err != nil {
initCh <- err initCh <- err
return return
} }
@ -209,10 +209,10 @@ func (l *PolicyLock) lockSlow() (err error) {
close(initCh) close(initCh)
var machine bool var machine bool
if l.scope == MachinePolicy { if lk.scope == MachinePolicy {
machine = true machine = true
} }
handle, err := l.enterFn(machine) handle, err := lk.enterFn(machine)
send_result: send_result:
for { for {
@ -226,7 +226,7 @@ func (l *PolicyLock) lockSlow() (err error) {
// The lock is being closed, and we lost the race to l.closing // The lock is being closed, and we lost the race to l.closing
// it the calling goroutine. // it the calling goroutine.
if err == nil { if err == nil {
l.leaveFn(handle) lk.leaveFn(handle)
} }
break send_result break send_result
default: default:
@ -247,21 +247,21 @@ func (l *PolicyLock) lockSlow() (err error) {
select { select {
case result := <-resultCh: case result := <-resultCh:
if result.err == nil { if result.err == nil {
l.handle = result.handle lk.handle = result.handle
} }
return result.err return result.err
case <-l.closing: case <-lk.closing:
return ErrInvalidLockState return ErrInvalidLockState
} }
} }
// Unlock unlocks l. // Unlock unlocks l.
// It panics if l is not locked on entry to Unlock. // It panics if l is not locked on entry to Unlock.
func (l *PolicyLock) Unlock() { func (lk *PolicyLock) Unlock() {
l.mu.Lock() lk.mu.Lock()
defer l.mu.Unlock() defer lk.mu.Unlock()
lockCnt := l.lockCnt.Add(-2) lockCnt := lk.lockCnt.Add(-2)
if lockCnt < 0 { if lockCnt < 0 {
panic("negative lockCnt") panic("negative lockCnt")
} }
@ -273,33 +273,33 @@ func (l *PolicyLock) Unlock() {
return return
} }
if l.handle != 0 { if lk.handle != 0 {
// Impersonation is not required to unlock a critical policy section. // Impersonation is not required to unlock a critical policy section.
// The handle we pass determines which mutex will be unlocked. // The handle we pass determines which mutex will be unlocked.
leaveCriticalPolicySection(l.handle) leaveCriticalPolicySection(lk.handle)
l.handle = 0 lk.handle = 0
} }
if lockCnt == 0 { if lockCnt == 0 {
// Complete the pending close request if there's no more readers. // Complete the pending close request if there's no more readers.
l.closeInternal() lk.closeInternal()
} }
} }
// Close releases resources associated with l. // Close releases resources associated with l.
// It is a no-op for the machine policy lock. // It is a no-op for the machine policy lock.
func (l *PolicyLock) Close() error { func (lk *PolicyLock) Close() error {
lockCnt := l.lockCnt.Load() lockCnt := lk.lockCnt.Load()
if lockCnt&1 == 0 { if lockCnt&1 == 0 {
// The lock has never been initialized, or close has already been called. // The lock has never been initialized, or close has already been called.
return nil return nil
} }
close(l.closing) close(lk.closing)
// Unset the LSB to indicate a pending close request. // Unset the LSB to indicate a pending close request.
for !l.lockCnt.CompareAndSwap(lockCnt, lockCnt&^int32(1)) { for !lk.lockCnt.CompareAndSwap(lockCnt, lockCnt&^int32(1)) {
lockCnt = l.lockCnt.Load() lockCnt = lk.lockCnt.Load()
} }
if lockCnt != 0 { if lockCnt != 0 {
@ -307,16 +307,16 @@ func (l *PolicyLock) Close() error {
return nil return nil
} }
return l.closeInternal() return lk.closeInternal()
} }
func (l *PolicyLock) closeInternal() error { func (lk *PolicyLock) closeInternal() error {
if l.token != 0 { if lk.token != 0 {
if err := l.token.Close(); err != nil { if err := lk.token.Close(); err != nil {
return err return err
} }
l.token = 0 lk.token = 0
} }
l.closing = nil lk.closing = nil
return nil return nil
} }

@ -256,8 +256,8 @@ func checkDomainAccount(username string) (sanitizedUserName string, isDomainAcco
// errors.Is to check for it. When capLevel == CapCreateProcess, the logon // errors.Is to check for it. When capLevel == CapCreateProcess, the logon
// enforces the user's logon hours policy (when present). // enforces the user's logon hours policy (when present).
func (ls *lsaSession) logonAs(srcName string, u *user.User, capLevel CapabilityLevel) (token windows.Token, err error) { func (ls *lsaSession) logonAs(srcName string, u *user.User, capLevel CapabilityLevel) (token windows.Token, err error) {
if l := len(srcName); l == 0 || l > _TOKEN_SOURCE_LENGTH { if ln := len(srcName); ln == 0 || ln > _TOKEN_SOURCE_LENGTH {
return 0, fmt.Errorf("%w, actual length is %d", ErrBadSrcName, l) return 0, fmt.Errorf("%w, actual length is %d", ErrBadSrcName, ln)
} }
if err := checkASCII(srcName); err != nil { if err := checkASCII(srcName); err != nil {
return 0, fmt.Errorf("%w: %v", ErrBadSrcName, err) return 0, fmt.Errorf("%w: %v", ErrBadSrcName, err)

@ -938,10 +938,10 @@ func mergeEnv(existingEnv []string, extraEnv map[string]string) []string {
result = append(result, strings.Join([]string{k, v}, "=")) result = append(result, strings.Join([]string{k, v}, "="))
} }
slices.SortFunc(result, func(l, r string) int { slices.SortFunc(result, func(a, b string) int {
kl, _, _ := strings.Cut(l, "=") ka, _, _ := strings.Cut(a, "=")
kr, _, _ := strings.Cut(r, "=") kb, _, _ := strings.Cut(b, "=")
return strings.Compare(kl, kr) return strings.Compare(ka, kb)
}) })
return result return result
} }

@ -83,8 +83,8 @@ func (sib *StartupInfoBuilder) Resolve() (startupInfo *windows.StartupInfo, inhe
// Always create a Unicode environment. // Always create a Unicode environment.
createProcessFlags = windows.CREATE_UNICODE_ENVIRONMENT createProcessFlags = windows.CREATE_UNICODE_ENVIRONMENT
if l := uint32(len(sib.attrs)); l > 0 { if ln := uint32(len(sib.attrs)); ln > 0 {
attrCont, err := windows.NewProcThreadAttributeList(l) attrCont, err := windows.NewProcThreadAttributeList(ln)
if err != nil { if err != nil {
return nil, false, 0, err return nil, false, 0, err
} }

@ -68,8 +68,8 @@ func checkContiguousBuffer[T any, BU BufUnit](t *testing.T, extra []BU, pt *T, p
if gotLen := int(ptLen); gotLen != expectedLen { if gotLen := int(ptLen); gotLen != expectedLen {
t.Errorf("allocation length got %d, want %d", gotLen, expectedLen) t.Errorf("allocation length got %d, want %d", gotLen, expectedLen)
} }
if l := len(slcs); l != 1 { if ln := len(slcs); ln != 1 {
t.Errorf("len(slcs) got %d, want 1", l) t.Errorf("len(slcs) got %d, want 1", ln)
} }
if len(extra) == 0 && slcs[0] != nil { if len(extra) == 0 && slcs[0] != nil {
t.Error("slcs[0] got non-nil, want nil") t.Error("slcs[0] got non-nil, want nil")

@ -66,8 +66,8 @@ func (p protocol) getLayers(d direction) []wf.LayerID {
return layers return layers
} }
func ruleName(action wf.Action, l wf.LayerID, name string) string { func ruleName(action wf.Action, layerID wf.LayerID, name string) string {
switch l { switch layerID {
case wf.LayerALEAuthConnectV4: case wf.LayerALEAuthConnectV4:
return fmt.Sprintf("%s outbound %s (IPv4)", action, name) return fmt.Sprintf("%s outbound %s (IPv4)", action, name)
case wf.LayerALEAuthConnectV6: case wf.LayerALEAuthConnectV6:
@ -307,8 +307,8 @@ func (f *Firewall) newRule(name string, w weight, layer wf.LayerID, conditions [
func (f *Firewall) addRules(name string, w weight, conditions []*wf.Match, action wf.Action, p protocol, d direction) ([]*wf.Rule, error) { func (f *Firewall) addRules(name string, w weight, conditions []*wf.Match, action wf.Action, p protocol, d direction) ([]*wf.Rule, error) {
var rules []*wf.Rule var rules []*wf.Rule
for _, l := range p.getLayers(d) { for _, layer := range p.getLayers(d) {
r, err := f.newRule(name, w, l, conditions, action) r, err := f.newRule(name, w, layer, conditions, action)
if err != nil { if err != nil {
return nil, err return nil, err
} }

@ -111,7 +111,7 @@ func (c *Conn) WaitReady(t testing.TB) {
} }
} }
func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) { func runDERPAndStun(t *testing.T, logf logger.Logf, ln nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) {
d := derpserver.New(key.NewNode(), logf) d := derpserver.New(key.NewNode(), logf)
httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d)) httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d))
@ -119,7 +119,7 @@ func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, st
httpsrv.Config.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) httpsrv.Config.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler))
httpsrv.StartTLS() httpsrv.StartTLS()
stunAddr, stunCleanup := stuntest.ServeWithPacketListener(t, l) stunAddr, stunCleanup := stuntest.ServeWithPacketListener(t, ln)
m := &tailcfg.DERPMap{ m := &tailcfg.DERPMap{
Regions: map[int]*tailcfg.DERPRegion{ Regions: map[int]*tailcfg.DERPRegion{
@ -172,12 +172,12 @@ type magicStack struct {
// newMagicStack builds and initializes an idle magicsock and // newMagicStack builds and initializes an idle magicsock and
// friends. You need to call conn.onNodeViewsUpdate and dev.Reconfig // friends. You need to call conn.onNodeViewsUpdate and dev.Reconfig
// before anything interesting happens. // before anything interesting happens.
func newMagicStack(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap) *magicStack { func newMagicStack(t testing.TB, logf logger.Logf, ln nettype.PacketListener, derpMap *tailcfg.DERPMap) *magicStack {
privateKey := key.NewNode() privateKey := key.NewNode()
return newMagicStackWithKey(t, logf, l, derpMap, privateKey) return newMagicStackWithKey(t, logf, ln, derpMap, privateKey)
} }
func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap, privateKey key.NodePrivate) *magicStack { func newMagicStackWithKey(t testing.TB, logf logger.Logf, ln nettype.PacketListener, derpMap *tailcfg.DERPMap, privateKey key.NodePrivate) *magicStack {
t.Helper() t.Helper()
bus := eventbustest.NewBus(t) bus := eventbustest.NewBus(t)
@ -197,7 +197,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen
Logf: logf, Logf: logf,
HealthTracker: ht, HealthTracker: ht,
DisablePortMapper: true, DisablePortMapper: true,
TestOnlyPacketListener: l, TestOnlyPacketListener: ln,
EndpointsFunc: func(eps []tailcfg.Endpoint) { EndpointsFunc: func(eps []tailcfg.Endpoint) {
epCh <- eps epCh <- eps
}, },
@ -687,13 +687,13 @@ func (localhostListener) ListenPacket(ctx context.Context, network, address stri
func TestTwoDevicePing(t *testing.T) { func TestTwoDevicePing(t *testing.T) {
flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/11762") flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/11762")
l, ip := localhostListener{}, netaddr.IPv4(127, 0, 0, 1) ln, ip := localhostListener{}, netaddr.IPv4(127, 0, 0, 1)
n := &devices{ n := &devices{
m1: l, m1: ln,
m1IP: ip, m1IP: ip,
m2: l, m2: ln,
m2IP: ip, m2IP: ip,
stun: l, stun: ln,
stunIP: ip, stunIP: ip,
} }
testTwoDevicePing(t, n) testTwoDevicePing(t, n)

@ -126,24 +126,24 @@ func newLinkEndpoint(size int, mtu uint32, linkAddr tcpip.LinkAddress, supported
return le return le
} }
// gro attempts to enqueue p on g if l supports a GRO kind matching the // gro attempts to enqueue p on g if ep supports a GRO kind matching the
// transport protocol carried in p. gro may allocate g if it is nil. gro can // transport protocol carried in p. gro may allocate g if it is nil. gro can
// either return the existing g, a newly allocated one, or nil. Callers are // either return the existing g, a newly allocated one, or nil. Callers are
// responsible for calling Flush() on the returned value if it is non-nil once // responsible for calling Flush() on the returned value if it is non-nil once
// they have finished iterating through all GRO candidates for a given vector. // they have finished iterating through all GRO candidates for a given vector.
// If gro allocates a *gro.GRO it will have l's stack.NetworkDispatcher set via // If gro allocates a *gro.GRO it will have ep's stack.NetworkDispatcher set via
// SetDispatcher(). // SetDispatcher().
func (l *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO { func (ep *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO {
if !buildfeatures.HasGRO || l.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP { if !buildfeatures.HasGRO || ep.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP {
// IPv6 may have extension headers preceding a TCP header, but we trade // IPv6 may have extension headers preceding a TCP header, but we trade
// for a fast path and assume p cannot be coalesced in such a case. // for a fast path and assume p cannot be coalesced in such a case.
l.injectInbound(p) ep.injectInbound(p)
return g return g
} }
if g == nil { if g == nil {
l.mu.RLock() ep.mu.RLock()
d := l.dispatcher d := ep.dispatcher
l.mu.RUnlock() ep.mu.RUnlock()
g = gro.NewGRO() g = gro.NewGRO()
g.SetDispatcher(d) g.SetDispatcher(d)
} }
@ -154,39 +154,39 @@ func (l *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO {
// Close closes l. Further packet injections will return an error, and all // Close closes l. Further packet injections will return an error, and all
// pending packets are discarded. Close may be called concurrently with // pending packets are discarded. Close may be called concurrently with
// WritePackets. // WritePackets.
func (l *linkEndpoint) Close() { func (ep *linkEndpoint) Close() {
l.mu.Lock() ep.mu.Lock()
l.dispatcher = nil ep.dispatcher = nil
l.mu.Unlock() ep.mu.Unlock()
l.q.Close() ep.q.Close()
l.Drain() ep.Drain()
} }
// Read does non-blocking read one packet from the outbound packet queue. // Read does non-blocking read one packet from the outbound packet queue.
func (l *linkEndpoint) Read() *stack.PacketBuffer { func (ep *linkEndpoint) Read() *stack.PacketBuffer {
return l.q.Read() return ep.q.Read()
} }
// ReadContext does blocking read for one packet from the outbound packet queue. // ReadContext does blocking read for one packet from the outbound packet queue.
// It can be cancelled by ctx, and in this case, it returns nil. // It can be cancelled by ctx, and in this case, it returns nil.
func (l *linkEndpoint) ReadContext(ctx context.Context) *stack.PacketBuffer { func (ep *linkEndpoint) ReadContext(ctx context.Context) *stack.PacketBuffer {
return l.q.ReadContext(ctx) return ep.q.ReadContext(ctx)
} }
// Drain removes all outbound packets from the channel and counts them. // Drain removes all outbound packets from the channel and counts them.
func (l *linkEndpoint) Drain() int { func (ep *linkEndpoint) Drain() int {
return l.q.Drain() return ep.q.Drain()
} }
// NumQueued returns the number of packets queued for outbound. // NumQueued returns the number of packets queued for outbound.
func (l *linkEndpoint) NumQueued() int { func (ep *linkEndpoint) NumQueued() int {
return l.q.Num() return ep.q.Num()
} }
func (l *linkEndpoint) injectInbound(p *packet.Parsed) { func (ep *linkEndpoint) injectInbound(p *packet.Parsed) {
l.mu.RLock() ep.mu.RLock()
d := l.dispatcher d := ep.dispatcher
l.mu.RUnlock() ep.mu.RUnlock()
if d == nil || !buildfeatures.HasNetstack { if d == nil || !buildfeatures.HasNetstack {
return return
} }
@ -200,35 +200,35 @@ func (l *linkEndpoint) injectInbound(p *packet.Parsed) {
// Attach saves the stack network-layer dispatcher for use later when packets // Attach saves the stack network-layer dispatcher for use later when packets
// are injected. // are injected.
func (l *linkEndpoint) Attach(dispatcher stack.NetworkDispatcher) { func (ep *linkEndpoint) Attach(dispatcher stack.NetworkDispatcher) {
l.mu.Lock() ep.mu.Lock()
defer l.mu.Unlock() defer ep.mu.Unlock()
l.dispatcher = dispatcher ep.dispatcher = dispatcher
} }
// IsAttached implements stack.LinkEndpoint.IsAttached. // IsAttached implements stack.LinkEndpoint.IsAttached.
func (l *linkEndpoint) IsAttached() bool { func (ep *linkEndpoint) IsAttached() bool {
l.mu.RLock() ep.mu.RLock()
defer l.mu.RUnlock() defer ep.mu.RUnlock()
return l.dispatcher != nil return ep.dispatcher != nil
} }
// MTU implements stack.LinkEndpoint.MTU. // MTU implements stack.LinkEndpoint.MTU.
func (l *linkEndpoint) MTU() uint32 { func (ep *linkEndpoint) MTU() uint32 {
l.mu.RLock() ep.mu.RLock()
defer l.mu.RUnlock() defer ep.mu.RUnlock()
return l.mtu return ep.mtu
} }
// SetMTU implements stack.LinkEndpoint.SetMTU. // SetMTU implements stack.LinkEndpoint.SetMTU.
func (l *linkEndpoint) SetMTU(mtu uint32) { func (ep *linkEndpoint) SetMTU(mtu uint32) {
l.mu.Lock() ep.mu.Lock()
defer l.mu.Unlock() defer ep.mu.Unlock()
l.mtu = mtu ep.mtu = mtu
} }
// Capabilities implements stack.LinkEndpoint.Capabilities. // Capabilities implements stack.LinkEndpoint.Capabilities.
func (l *linkEndpoint) Capabilities() stack.LinkEndpointCapabilities { func (ep *linkEndpoint) Capabilities() stack.LinkEndpointCapabilities {
// We are required to offload RX checksum validation for the purposes of // We are required to offload RX checksum validation for the purposes of
// GRO. // GRO.
return stack.CapabilityRXChecksumOffload return stack.CapabilityRXChecksumOffload
@ -242,8 +242,8 @@ func (*linkEndpoint) GSOMaxSize() uint32 {
} }
// SupportedGSO implements stack.GSOEndpoint. // SupportedGSO implements stack.GSOEndpoint.
func (l *linkEndpoint) SupportedGSO() stack.SupportedGSO { func (ep *linkEndpoint) SupportedGSO() stack.SupportedGSO {
return l.SupportedGSOKind return ep.SupportedGSOKind
} }
// MaxHeaderLength returns the maximum size of the link layer header. Given it // MaxHeaderLength returns the maximum size of the link layer header. Given it
@ -253,22 +253,22 @@ func (*linkEndpoint) MaxHeaderLength() uint16 {
} }
// LinkAddress returns the link address of this endpoint. // LinkAddress returns the link address of this endpoint.
func (l *linkEndpoint) LinkAddress() tcpip.LinkAddress { func (ep *linkEndpoint) LinkAddress() tcpip.LinkAddress {
l.mu.RLock() ep.mu.RLock()
defer l.mu.RUnlock() defer ep.mu.RUnlock()
return l.linkAddr return ep.linkAddr
} }
// SetLinkAddress implements stack.LinkEndpoint.SetLinkAddress. // SetLinkAddress implements stack.LinkEndpoint.SetLinkAddress.
func (l *linkEndpoint) SetLinkAddress(addr tcpip.LinkAddress) { func (ep *linkEndpoint) SetLinkAddress(addr tcpip.LinkAddress) {
l.mu.Lock() ep.mu.Lock()
defer l.mu.Unlock() defer ep.mu.Unlock()
l.linkAddr = addr ep.linkAddr = addr
} }
// WritePackets stores outbound packets into the channel. // WritePackets stores outbound packets into the channel.
// Multiple concurrent calls are permitted. // Multiple concurrent calls are permitted.
func (l *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) { func (ep *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) {
n := 0 n := 0
// TODO(jwhited): evaluate writing a stack.PacketBufferList instead of a // TODO(jwhited): evaluate writing a stack.PacketBufferList instead of a
// single packet. We can split 2 x 64K GSO across // single packet. We can split 2 x 64K GSO across
@ -278,7 +278,7 @@ func (l *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Err
// control MTU (and by effect TCP MSS in gVisor) we *shouldn't* expect to // control MTU (and by effect TCP MSS in gVisor) we *shouldn't* expect to
// ever overflow 128 slots (see wireguard-go/tun.ErrTooManySegments usage). // ever overflow 128 slots (see wireguard-go/tun.ErrTooManySegments usage).
for _, pkt := range pkts.AsSlice() { for _, pkt := range pkts.AsSlice() {
if err := l.q.Write(pkt); err != nil { if err := ep.q.Write(pkt); err != nil {
if _, ok := err.(*tcpip.ErrNoBufferSpace); !ok && n == 0 { if _, ok := err.(*tcpip.ErrNoBufferSpace); !ok && n == 0 {
return 0, err return 0, err
} }

@ -870,7 +870,7 @@ func (o *fakeOS) run(args ...string) error {
rest = family + " " + strings.Join(args[3:], " ") rest = family + " " + strings.Join(args[3:], " ")
} }
var l *[]string var ls *[]string
switch args[1] { switch args[1] {
case "link": case "link":
got := strings.Join(args[2:], " ") got := strings.Join(args[2:], " ")
@ -884,31 +884,31 @@ func (o *fakeOS) run(args ...string) error {
} }
return nil return nil
case "addr": case "addr":
l = &o.ips ls = &o.ips
case "route": case "route":
l = &o.routes ls = &o.routes
case "rule": case "rule":
l = &o.rules ls = &o.rules
default: default:
return unexpected() return unexpected()
} }
switch args[2] { switch args[2] {
case "add": case "add":
for _, el := range *l { for _, el := range *ls {
if el == rest { if el == rest {
o.t.Errorf("can't add %q, already present", rest) o.t.Errorf("can't add %q, already present", rest)
return errors.New("already exists") return errors.New("already exists")
} }
} }
*l = append(*l, rest) *ls = append(*ls, rest)
sort.Strings(*l) sort.Strings(*ls)
case "del": case "del":
found := false found := false
for i, el := range *l { for i, el := range *ls {
if el == rest { if el == rest {
found = true found = true
*l = append((*l)[:i], (*l)[i+1:]...) *ls = append((*ls)[:i], (*ls)[i+1:]...)
break break
} }
} }

Loading…
Cancel
Save